1 """Python wrappers around TensorFlow ops.
   2 
   3 This file is MACHINE GENERATED! Do not edit.
   4 Original C++ source file: data_flow_ops.cc
   5 """
   6 
   7 import collections as _collections
   8 import six as _six
   9 
  10 from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
  11 from tensorflow.python.eager import context as _context
  12 from tensorflow.python.eager import core as _core
  13 from tensorflow.python.eager import execute as _execute
  14 from tensorflow.python.framework import dtypes as _dtypes
  15 from tensorflow.python.framework import errors as _errors
  16 from tensorflow.python.framework import tensor_shape as _tensor_shape
  17 
  18 from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
  19 # Needed to trigger the call to _set_call_cpp_shape_fn.
  20 from tensorflow.python.framework import common_shapes as _common_shapes
  21 from tensorflow.python.framework import op_def_registry as _op_def_registry
  22 from tensorflow.python.framework import ops as _ops
  23 from tensorflow.python.framework import op_def_library as _op_def_library
  24 from tensorflow.python.util.deprecation import deprecated_endpoints
  25 from tensorflow.python.util.tf_export import tf_export
  26 
  27 
  28 def accumulator_apply_gradient(handle, local_step, gradient, name=None):
  29   r"""Applies a gradient to a given accumulator.
  30 
  31   Does not add if local_step is lesser than the accumulator's global_step.
  32 
  33   Args:
  34     handle: A `Tensor` of type mutable `string`. The handle to a accumulator.
  35     local_step: A `Tensor` of type `int64`.
  36       The local_step value at which the gradient was computed.
  37     gradient: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
  38       A tensor of the gradient to be accumulated.
  39     name: A name for the operation (optional).
  40 
  41   Returns:
  42     The created Operation.
  43   """
  44   _ctx = _context._context
  45   if _ctx is None or not _ctx._eager_context.is_eager:
  46     _, _, _op = _op_def_lib._apply_op_helper(
  47         "AccumulatorApplyGradient", handle=handle, local_step=local_step,
  48         gradient=gradient, name=name)
  49     return _op
  50     _result = None
  51     return _result
  52 
  53   else:
  54     raise RuntimeError("accumulator_apply_gradient op does not support eager execution. Arg 'handle' is a ref.")
  55 
  56 
  57   raise RuntimeError("accumulator_apply_gradient op does not support eager execution. Arg 'handle' is a ref.")
  58 
  59 def accumulator_num_accumulated(handle, name=None):
  60   r"""Returns the number of gradients aggregated in the given accumulators.
  61 
  62   Args:
  63     handle: A `Tensor` of type mutable `string`. The handle to an accumulator.
  64     name: A name for the operation (optional).
  65 
  66   Returns:
  67     A `Tensor` of type `int32`.
  68   """
  69   _ctx = _context._context
  70   if _ctx is None or not _ctx._eager_context.is_eager:
  71     _, _, _op = _op_def_lib._apply_op_helper(
  72         "AccumulatorNumAccumulated", handle=handle, name=name)
  73     _result = _op.outputs[:]
  74     _inputs_flat = _op.inputs
  75     _attrs = None
  76     _execute.record_gradient(
  77       "AccumulatorNumAccumulated", _inputs_flat, _attrs, _result, name)
  78     _result, = _result
  79     return _result
  80 
  81   else:
  82     raise RuntimeError("accumulator_num_accumulated op does not support eager execution. Arg 'handle' is a ref.")
  83 
  84 
  85   raise RuntimeError("accumulator_num_accumulated op does not support eager execution. Arg 'handle' is a ref.")
  86 
  87 def accumulator_set_global_step(handle, new_global_step, name=None):
  88   r"""Updates the accumulator with a new value for global_step.
  89 
  90   Logs warning if the accumulator's value is already higher than
  91   new_global_step.
  92 
  93   Args:
  94     handle: A `Tensor` of type mutable `string`. The handle to an accumulator.
  95     new_global_step: A `Tensor` of type `int64`.
  96       The new global_step value to set.
  97     name: A name for the operation (optional).
  98 
  99   Returns:
 100     The created Operation.
 101   """
 102   _ctx = _context._context
 103   if _ctx is None or not _ctx._eager_context.is_eager:
 104     _, _, _op = _op_def_lib._apply_op_helper(
 105         "AccumulatorSetGlobalStep", handle=handle,
 106         new_global_step=new_global_step, name=name)
 107     return _op
 108     _result = None
 109     return _result
 110 
 111   else:
 112     raise RuntimeError("accumulator_set_global_step op does not support eager execution. Arg 'handle' is a ref.")
 113 
 114 
 115   raise RuntimeError("accumulator_set_global_step op does not support eager execution. Arg 'handle' is a ref.")
 116 
 117 def accumulator_take_gradient(handle, num_required, dtype, name=None):
 118   r"""Extracts the average gradient in the given ConditionalAccumulator.
 119 
 120   The op blocks until sufficient (i.e., more than num_required)
 121   gradients have been accumulated.  If the accumulator has already
 122   aggregated more than num_required gradients, it returns the average of
 123   the accumulated gradients.  Also automatically increments the recorded
 124   global_step in the accumulator by 1, and resets the aggregate to 0.
 125 
 126   Args:
 127     handle: A `Tensor` of type mutable `string`. The handle to an accumulator.
 128     num_required: A `Tensor` of type `int32`.
 129       Number of gradients required before we return an aggregate.
 130     dtype: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`.
 131       The data type of accumulated gradients. Needs to correspond to the type
 132       of the accumulator.
 133     name: A name for the operation (optional).
 134 
 135   Returns:
 136     A `Tensor` of type `dtype`.
 137   """
 138   _ctx = _context._context
 139   if _ctx is None or not _ctx._eager_context.is_eager:
 140     dtype = _execute.make_type(dtype, "dtype")
 141     _, _, _op = _op_def_lib._apply_op_helper(
 142         "AccumulatorTakeGradient", handle=handle, num_required=num_required,
 143         dtype=dtype, name=name)
 144     _result = _op.outputs[:]
 145     _inputs_flat = _op.inputs
 146     _attrs = ("dtype", _op.get_attr("dtype"))
 147     _execute.record_gradient(
 148       "AccumulatorTakeGradient", _inputs_flat, _attrs, _result, name)
 149     _result, = _result
 150     return _result
 151 
 152   else:
 153     raise RuntimeError("accumulator_take_gradient op does not support eager execution. Arg 'handle' is a ref.")
 154 
 155 
 156   raise RuntimeError("accumulator_take_gradient op does not support eager execution. Arg 'handle' is a ref.")
 157 
 158 def barrier(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
 159   r"""Defines a barrier that persists across different graph executions.
 160 
 161   A barrier represents a key-value map, where each key is a string, and
 162   each value is a tuple of tensors.
 163 
 164   At runtime, the barrier contains 'complete' and 'incomplete'
 165   elements. A complete element has defined tensors for all components of
 166   its value tuple, and may be accessed using BarrierTakeMany. An
 167   incomplete element has some undefined components in its value tuple,
 168   and may be updated using BarrierInsertMany.
 169 
 170   Args:
 171     component_types: A list of `tf.DTypes` that has length `>= 1`.
 172       The type of each component in a value.
 173     shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
 174       The shape of each component in a value. Each shape must be 1 in the
 175       first dimension. The length of this attr must be the same as the length of
 176       component_types.
 177     capacity: An optional `int`. Defaults to `-1`.
 178       The capacity of the barrier.  The default capacity is MAX_INT32,
 179       which is the largest capacity of the underlying queue.
 180     container: An optional `string`. Defaults to `""`.
 181       If non-empty, this barrier is placed in the given container.
 182       Otherwise, a default container is used.
 183     shared_name: An optional `string`. Defaults to `""`.
 184       If non-empty, this barrier will be shared under the given name
 185       across multiple sessions.
 186     name: A name for the operation (optional).
 187 
 188   Returns:
 189     A `Tensor` of type mutable `string`.
 190   """
 191   _ctx = _context._context
 192   if _ctx is None or not _ctx._eager_context.is_eager:
 193     if not isinstance(component_types, (list, tuple)):
 194       raise TypeError(
 195           "Expected list for 'component_types' argument to "
 196           "'barrier' Op, not %r." % component_types)
 197     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
 198     if shapes is None:
 199       shapes = []
 200     if not isinstance(shapes, (list, tuple)):
 201       raise TypeError(
 202           "Expected list for 'shapes' argument to "
 203           "'barrier' Op, not %r." % shapes)
 204     shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
 205     if capacity is None:
 206       capacity = -1
 207     capacity = _execute.make_int(capacity, "capacity")
 208     if container is None:
 209       container = ""
 210     container = _execute.make_str(container, "container")
 211     if shared_name is None:
 212       shared_name = ""
 213     shared_name = _execute.make_str(shared_name, "shared_name")
 214     _, _, _op = _op_def_lib._apply_op_helper(
 215         "Barrier", component_types=component_types, shapes=shapes,
 216         capacity=capacity, container=container, shared_name=shared_name,
 217         name=name)
 218     _result = _op.outputs[:]
 219     _inputs_flat = _op.inputs
 220     _attrs = ("component_types", _op.get_attr("component_types"), "shapes",
 221               _op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
 222               "container", _op.get_attr("container"), "shared_name",
 223               _op.get_attr("shared_name"))
 224     _execute.record_gradient(
 225       "Barrier", _inputs_flat, _attrs, _result, name)
 226     _result, = _result
 227     return _result
 228 
 229   else:
 230     raise RuntimeError("barrier op does not support eager execution. Arg 'handle' is a ref.")
 231 
 232 
 233   raise RuntimeError("barrier op does not support eager execution. Arg 'handle' is a ref.")
 234 
 235 def barrier_close(handle, cancel_pending_enqueues=False, name=None):
 236   r"""Closes the given barrier.
 237 
 238   This operation signals that no more new elements will be inserted in the
 239   given barrier. Subsequent InsertMany that try to introduce a new key will fail.
 240   Subsequent InsertMany operations that just add missing components to already
 241   existing elements will continue to succeed. Subsequent TakeMany operations will
 242   continue to succeed if sufficient completed elements remain in the barrier.
 243   Subsequent TakeMany operations that would block will fail immediately.
 244 
 245   Args:
 246     handle: A `Tensor` of type mutable `string`. The handle to a barrier.
 247     cancel_pending_enqueues: An optional `bool`. Defaults to `False`.
 248       If true, all pending enqueue requests that are
 249       blocked on the barrier's queue will be canceled. InsertMany will fail, even
 250       if no new key is introduced.
 251     name: A name for the operation (optional).
 252 
 253   Returns:
 254     The created Operation.
 255   """
 256   _ctx = _context._context
 257   if _ctx is None or not _ctx._eager_context.is_eager:
 258     if cancel_pending_enqueues is None:
 259       cancel_pending_enqueues = False
 260     cancel_pending_enqueues = _execute.make_bool(cancel_pending_enqueues, "cancel_pending_enqueues")
 261     _, _, _op = _op_def_lib._apply_op_helper(
 262         "BarrierClose", handle=handle,
 263         cancel_pending_enqueues=cancel_pending_enqueues, name=name)
 264     return _op
 265     _result = None
 266     return _result
 267 
 268   else:
 269     raise RuntimeError("barrier_close op does not support eager execution. Arg 'handle' is a ref.")
 270 
 271 
 272   raise RuntimeError("barrier_close op does not support eager execution. Arg 'handle' is a ref.")
 273 
 274 def barrier_incomplete_size(handle, name=None):
 275   r"""Computes the number of incomplete elements in the given barrier.
 276 
 277   Args:
 278     handle: A `Tensor` of type mutable `string`. The handle to a barrier.
 279     name: A name for the operation (optional).
 280 
 281   Returns:
 282     A `Tensor` of type `int32`.
 283   """
 284   _ctx = _context._context
 285   if _ctx is None or not _ctx._eager_context.is_eager:
 286     _, _, _op = _op_def_lib._apply_op_helper(
 287         "BarrierIncompleteSize", handle=handle, name=name)
 288     _result = _op.outputs[:]
 289     _inputs_flat = _op.inputs
 290     _attrs = None
 291     _execute.record_gradient(
 292       "BarrierIncompleteSize", _inputs_flat, _attrs, _result, name)
 293     _result, = _result
 294     return _result
 295 
 296   else:
 297     raise RuntimeError("barrier_incomplete_size op does not support eager execution. Arg 'handle' is a ref.")
 298 
 299 
 300   raise RuntimeError("barrier_incomplete_size op does not support eager execution. Arg 'handle' is a ref.")
 301 
 302 def barrier_insert_many(handle, keys, values, component_index, name=None):
 303   r"""For each key, assigns the respective value to the specified component.
 304 
 305   If a key is not found in the barrier, this operation will create a new
 306   incomplete element. If a key is found in the barrier, and the element
 307   already has a value at component_index, this operation will fail with
 308   INVALID_ARGUMENT, and leave the barrier in an undefined state.
 309 
 310   Args:
 311     handle: A `Tensor` of type mutable `string`. The handle to a barrier.
 312     keys: A `Tensor` of type `string`.
 313       A one-dimensional tensor of keys, with length n.
 314     values: A `Tensor`.
 315       An any-dimensional tensor of values, which are associated with the
 316       respective keys. The 0th dimension must have length n.
 317     component_index: An `int`.
 318       The component of the barrier elements that is being assigned.
 319     name: A name for the operation (optional).
 320 
 321   Returns:
 322     The created Operation.
 323   """
 324   _ctx = _context._context
 325   if _ctx is None or not _ctx._eager_context.is_eager:
 326     component_index = _execute.make_int(component_index, "component_index")
 327     _, _, _op = _op_def_lib._apply_op_helper(
 328         "BarrierInsertMany", handle=handle, keys=keys, values=values,
 329         component_index=component_index, name=name)
 330     return _op
 331     _result = None
 332     return _result
 333 
 334   else:
 335     raise RuntimeError("barrier_insert_many op does not support eager execution. Arg 'handle' is a ref.")
 336 
 337 
 338   raise RuntimeError("barrier_insert_many op does not support eager execution. Arg 'handle' is a ref.")
 339 
 340 def barrier_ready_size(handle, name=None):
 341   r"""Computes the number of complete elements in the given barrier.
 342 
 343   Args:
 344     handle: A `Tensor` of type mutable `string`. The handle to a barrier.
 345     name: A name for the operation (optional).
 346 
 347   Returns:
 348     A `Tensor` of type `int32`.
 349   """
 350   _ctx = _context._context
 351   if _ctx is None or not _ctx._eager_context.is_eager:
 352     _, _, _op = _op_def_lib._apply_op_helper(
 353         "BarrierReadySize", handle=handle, name=name)
 354     _result = _op.outputs[:]
 355     _inputs_flat = _op.inputs
 356     _attrs = None
 357     _execute.record_gradient(
 358       "BarrierReadySize", _inputs_flat, _attrs, _result, name)
 359     _result, = _result
 360     return _result
 361 
 362   else:
 363     raise RuntimeError("barrier_ready_size op does not support eager execution. Arg 'handle' is a ref.")
 364 
 365 
 366   raise RuntimeError("barrier_ready_size op does not support eager execution. Arg 'handle' is a ref.")
 367 
 368 _barrier_take_many_outputs = ["indices", "keys", "values"]
 369 _BarrierTakeManyOutput = _collections.namedtuple(
 370     "BarrierTakeMany", _barrier_take_many_outputs)
 371 
 372 
 373 def barrier_take_many(handle, num_elements, component_types, allow_small_batch=False, wait_for_incomplete=False, timeout_ms=-1, name=None):
 374   r"""Takes the given number of completed elements from a barrier.
 375 
 376   This operation concatenates completed-element component tensors along
 377   the 0th dimension to make a single component tensor.
 378 
 379   Elements come out of the barrier when they are complete, and in the order
 380   in which they were placed into the barrier.  The indices output provides
 381   information about the batch in which each element was originally inserted
 382   into the barrier.
 383 
 384   Args:
 385     handle: A `Tensor` of type mutable `string`. The handle to a barrier.
 386     num_elements: A `Tensor` of type `int32`.
 387       A single-element tensor containing the number of elements to
 388       take.
 389     component_types: A list of `tf.DTypes` that has length `>= 1`.
 390       The type of each component in a value.
 391     allow_small_batch: An optional `bool`. Defaults to `False`.
 392       Allow to return less than num_elements items if barrier is
 393       already closed.
 394     wait_for_incomplete: An optional `bool`. Defaults to `False`.
 395     timeout_ms: An optional `int`. Defaults to `-1`.
 396       If the queue is empty, this operation will block for up to
 397       timeout_ms milliseconds.
 398       Note: This option is not supported yet.
 399     name: A name for the operation (optional).
 400 
 401   Returns:
 402     A tuple of `Tensor` objects (indices, keys, values).
 403 
 404     indices: A `Tensor` of type `int64`.
 405     keys: A `Tensor` of type `string`.
 406     values: A list of `Tensor` objects of type `component_types`.
 407   """
 408   _ctx = _context._context
 409   if _ctx is None or not _ctx._eager_context.is_eager:
 410     if not isinstance(component_types, (list, tuple)):
 411       raise TypeError(
 412           "Expected list for 'component_types' argument to "
 413           "'barrier_take_many' Op, not %r." % component_types)
 414     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
 415     if allow_small_batch is None:
 416       allow_small_batch = False
 417     allow_small_batch = _execute.make_bool(allow_small_batch, "allow_small_batch")
 418     if wait_for_incomplete is None:
 419       wait_for_incomplete = False
 420     wait_for_incomplete = _execute.make_bool(wait_for_incomplete, "wait_for_incomplete")
 421     if timeout_ms is None:
 422       timeout_ms = -1
 423     timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
 424     _, _, _op = _op_def_lib._apply_op_helper(
 425         "BarrierTakeMany", handle=handle, num_elements=num_elements,
 426         component_types=component_types, allow_small_batch=allow_small_batch,
 427         wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms,
 428         name=name)
 429     _result = _op.outputs[:]
 430     _inputs_flat = _op.inputs
 431     _attrs = ("component_types", _op.get_attr("component_types"),
 432               "allow_small_batch", _op.get_attr("allow_small_batch"),
 433               "wait_for_incomplete", _op.get_attr("wait_for_incomplete"),
 434               "timeout_ms", _op.get_attr("timeout_ms"))
 435     _execute.record_gradient(
 436       "BarrierTakeMany", _inputs_flat, _attrs, _result, name)
 437     _result = _result[:2] + [_result[2:]]
 438     _result = _BarrierTakeManyOutput._make(_result)
 439     return _result
 440 
 441   else:
 442     raise RuntimeError("barrier_take_many op does not support eager execution. Arg 'handle' is a ref.")
 443 
 444 
 445   raise RuntimeError("barrier_take_many op does not support eager execution. Arg 'handle' is a ref.")
 446 
 447 def conditional_accumulator(dtype, shape, container="", shared_name="", reduction_type="MEAN", name=None):
 448   r"""A conditional accumulator for aggregating gradients.
 449 
 450   The accumulator accepts gradients marked with local_step greater or
 451   equal to the most recent global_step known to the accumulator. The
 452   average can be extracted from the accumulator, provided sufficient
 453   gradients have been accumulated. Extracting the average automatically
 454   resets the aggregate to 0, and increments the global_step recorded by
 455   the accumulator.
 456 
 457   Args:
 458     dtype: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`.
 459       The type of the value being accumulated.
 460     shape: A `tf.TensorShape` or list of `ints`.
 461       The shape of the values, can be [], in which case shape is unknown.
 462     container: An optional `string`. Defaults to `""`.
 463       If non-empty, this accumulator is placed in the given container.
 464       Otherwise, a default container is used.
 465     shared_name: An optional `string`. Defaults to `""`.
 466       If non-empty, this accumulator will be shared under the
 467       given name across multiple sessions.
 468     reduction_type: An optional `string` from: `"MEAN", "SUM"`. Defaults to `"MEAN"`.
 469     name: A name for the operation (optional).
 470 
 471   Returns:
 472     A `Tensor` of type mutable `string`.
 473   """
 474   _ctx = _context._context
 475   if _ctx is None or not _ctx._eager_context.is_eager:
 476     dtype = _execute.make_type(dtype, "dtype")
 477     shape = _execute.make_shape(shape, "shape")
 478     if container is None:
 479       container = ""
 480     container = _execute.make_str(container, "container")
 481     if shared_name is None:
 482       shared_name = ""
 483     shared_name = _execute.make_str(shared_name, "shared_name")
 484     if reduction_type is None:
 485       reduction_type = "MEAN"
 486     reduction_type = _execute.make_str(reduction_type, "reduction_type")
 487     _, _, _op = _op_def_lib._apply_op_helper(
 488         "ConditionalAccumulator", dtype=dtype, shape=shape,
 489         container=container, shared_name=shared_name,
 490         reduction_type=reduction_type, name=name)
 491     _result = _op.outputs[:]
 492     _inputs_flat = _op.inputs
 493     _attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape"),
 494               "container", _op.get_attr("container"), "shared_name",
 495               _op.get_attr("shared_name"), "reduction_type",
 496               _op.get_attr("reduction_type"))
 497     _execute.record_gradient(
 498       "ConditionalAccumulator", _inputs_flat, _attrs, _result, name)
 499     _result, = _result
 500     return _result
 501 
 502   else:
 503     raise RuntimeError("conditional_accumulator op does not support eager execution. Arg 'handle' is a ref.")
 504 
 505 
 506   raise RuntimeError("conditional_accumulator op does not support eager execution. Arg 'handle' is a ref.")
 507 
 508 def delete_session_tensor(handle, name=None):
 509   r"""Delete the tensor specified by its handle in the session.
 510 
 511   Args:
 512     handle: A `Tensor` of type `string`.
 513       The handle for a tensor stored in the session state.
 514     name: A name for the operation (optional).
 515 
 516   Returns:
 517     The created Operation.
 518   """
 519   _ctx = _context._context
 520   if _ctx is None or not _ctx._eager_context.is_eager:
 521     _, _, _op = _op_def_lib._apply_op_helper(
 522         "DeleteSessionTensor", handle=handle, name=name)
 523     return _op
 524     _result = None
 525     return _result
 526 
 527   else:
 528     try:
 529       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 530         _ctx._context_handle, _ctx._eager_context.device_name,
 531         "DeleteSessionTensor", name, _ctx._post_execution_callbacks, handle)
 532       return _result
 533     except _core._FallbackException:
 534       return delete_session_tensor_eager_fallback(
 535           handle, name=name, ctx=_ctx)
 536     except _core._NotOkStatusException as e:
 537       if name is not None:
 538         message = e.message + " name: " + name
 539       else:
 540         message = e.message
 541       _six.raise_from(_core._status_to_exception(e.code, message), None)
 542 
 543 
 544 def delete_session_tensor_eager_fallback(handle, name=None, ctx=None):
 545   r"""This is the slowpath function for Eager mode.
 546   This is for function delete_session_tensor
 547   """
 548   _ctx = ctx if ctx else _context.context()
 549   handle = _ops.convert_to_tensor(handle, _dtypes.string)
 550   _inputs_flat = [handle]
 551   _attrs = None
 552   _result = _execute.execute(b"DeleteSessionTensor", 0, inputs=_inputs_flat,
 553                              attrs=_attrs, ctx=_ctx, name=name)
 554   _result = None
 555   return _result
 556 
 557 
 558 @tf_export('dynamic_partition')
 559 def dynamic_partition(data, partitions, num_partitions, name=None):
 560   r"""Partitions `data` into `num_partitions` tensors using indices from `partitions`.
 561 
 562   For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
 563   becomes part of `outputs[partitions[js]]`.  The slices with `partitions[js] = i`
 564   are placed in `outputs[i]` in lexicographic order of `js`, and the first
 565   dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
 566   In detail,
 567 
 568   ```python
 569       outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
 570 
 571       outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
 572   ```
 573 
 574   `data.shape` must start with `partitions.shape`.
 575 
 576   For example:
 577 
 578   ```python
 579       # Scalar partitions.
 580       partitions = 1
 581       num_partitions = 2
 582       data = [10, 20]
 583       outputs[0] = []  # Empty with shape [0, 2]
 584       outputs[1] = [[10, 20]]
 585 
 586       # Vector partitions.
 587       partitions = [0, 0, 1, 1, 0]
 588       num_partitions = 2
 589       data = [10, 20, 30, 40, 50]
 590       outputs[0] = [10, 20, 50]
 591       outputs[1] = [30, 40]
 592   ```
 593 
 594   See `dynamic_stitch` for an example on how to merge partitions back.
 595 
 596   <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
 597   <img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt>
 598   </div>
 599 
 600   Args:
 601     data: A `Tensor`.
 602     partitions: A `Tensor` of type `int32`.
 603       Any shape.  Indices in the range `[0, num_partitions)`.
 604     num_partitions: An `int` that is `>= 1`.
 605       The number of partitions to output.
 606     name: A name for the operation (optional).
 607 
 608   Returns:
 609     A list of `num_partitions` `Tensor` objects with the same type as `data`.
 610   """
 611   _ctx = _context._context
 612   if _ctx is None or not _ctx._eager_context.is_eager:
 613     num_partitions = _execute.make_int(num_partitions, "num_partitions")
 614     _, _, _op = _op_def_lib._apply_op_helper(
 615         "DynamicPartition", data=data, partitions=partitions,
 616         num_partitions=num_partitions, name=name)
 617     _result = _op.outputs[:]
 618     _inputs_flat = _op.inputs
 619     _attrs = ("num_partitions", _op.get_attr("num_partitions"), "T",
 620               _op.get_attr("T"))
 621     _execute.record_gradient(
 622       "DynamicPartition", _inputs_flat, _attrs, _result, name)
 623     return _result
 624 
 625   else:
 626     try:
 627       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 628         _ctx._context_handle, _ctx._eager_context.device_name,
 629         "DynamicPartition", name, _ctx._post_execution_callbacks, data,
 630         partitions, "num_partitions", num_partitions)
 631       return _result
 632     except _core._FallbackException:
 633       return dynamic_partition_eager_fallback(
 634           data, partitions, num_partitions=num_partitions, name=name,
 635           ctx=_ctx)
 636     except _core._NotOkStatusException as e:
 637       if name is not None:
 638         message = e.message + " name: " + name
 639       else:
 640         message = e.message
 641       _six.raise_from(_core._status_to_exception(e.code, message), None)
 642 
 643 
 644 def dynamic_partition_eager_fallback(data, partitions, num_partitions, name=None, ctx=None):
 645   r"""This is the slowpath function for Eager mode.
 646   This is for function dynamic_partition
 647   """
 648   _ctx = ctx if ctx else _context.context()
 649   num_partitions = _execute.make_int(num_partitions, "num_partitions")
 650   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
 651   partitions = _ops.convert_to_tensor(partitions, _dtypes.int32)
 652   _inputs_flat = [data, partitions]
 653   _attrs = ("num_partitions", num_partitions, "T", _attr_T)
 654   _result = _execute.execute(b"DynamicPartition", num_partitions,
 655                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
 656                              name=name)
 657   _execute.record_gradient(
 658       "DynamicPartition", _inputs_flat, _attrs, _result, name)
 659   return _result
 660 
 661 
 662 @tf_export('dynamic_stitch')
 663 def dynamic_stitch(indices, data, name=None):
 664   r"""Interleave the values from the `data` tensors into a single tensor.
 665 
 666   Builds a merged tensor such that
 667 
 668   ```python
 669       merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
 670   ```
 671 
 672   For example, if each `indices[m]` is scalar or vector, we have
 673 
 674   ```python
 675       # Scalar indices:
 676       merged[indices[m], ...] = data[m][...]
 677 
 678       # Vector indices:
 679       merged[indices[m][i], ...] = data[m][i, ...]
 680   ```
 681 
 682   Each `data[i].shape` must start with the corresponding `indices[i].shape`,
 683   and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
 684   must have `data[i].shape = indices[i].shape + constant`.  In terms of this
 685   `constant`, the output shape is
 686 
 687       merged.shape = [max(indices)] + constant
 688 
 689   Values are merged in order, so if an index appears in both `indices[m][i]` and
 690   `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
 691   merged result. If you do not need this guarantee, ParallelDynamicStitch might
 692   perform better on some devices.
 693 
 694   For example:
 695 
 696   ```python
 697       indices[0] = 6
 698       indices[1] = [4, 1]
 699       indices[2] = [[5, 2], [0, 3]]
 700       data[0] = [61, 62]
 701       data[1] = [[41, 42], [11, 12]]
 702       data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
 703       merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
 704                 [51, 52], [61, 62]]
 705   ```
 706 
 707   This method can be used to merge partitions created by `dynamic_partition`
 708   as illustrated on the following example:
 709 
 710   ```python
 711       # Apply function (increments x_i) on elements for which a certain condition
 712       # apply (x_i != -1 in this example).
 713       x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
 714       condition_mask=tf.not_equal(x,tf.constant(-1.))
 715       partitioned_data = tf.dynamic_partition(
 716           x, tf.cast(condition_mask, tf.int32) , 2)
 717       partitioned_data[1] = partitioned_data[1] + 1.0
 718       condition_indices = tf.dynamic_partition(
 719           tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
 720       x = tf.dynamic_stitch(condition_indices, partitioned_data)
 721       # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
 722       # unchanged.
 723   ```
 724 
 725   <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
 726   <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
 727   </div>
 728 
 729   Args:
 730     indices: A list of at least 1 `Tensor` objects with type `int32`.
 731     data: A list with the same length as `indices` of `Tensor` objects with the same type.
 732     name: A name for the operation (optional).
 733 
 734   Returns:
 735     A `Tensor`. Has the same type as `data`.
 736   """
 737   _ctx = _context._context
 738   if _ctx is None or not _ctx._eager_context.is_eager:
 739     if not isinstance(indices, (list, tuple)):
 740       raise TypeError(
 741           "Expected list for 'indices' argument to "
 742           "'dynamic_stitch' Op, not %r." % indices)
 743     _attr_N = len(indices)
 744     if not isinstance(data, (list, tuple)):
 745       raise TypeError(
 746           "Expected list for 'data' argument to "
 747           "'dynamic_stitch' Op, not %r." % data)
 748     if len(data) != _attr_N:
 749       raise ValueError(
 750           "List argument 'data' to 'dynamic_stitch' Op with length %d "
 751           "must match length %d of argument 'indices'." %
 752           (len(data), _attr_N))
 753     _, _, _op = _op_def_lib._apply_op_helper(
 754         "DynamicStitch", indices=indices, data=data, name=name)
 755     _result = _op.outputs[:]
 756     _inputs_flat = _op.inputs
 757     _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"))
 758     _execute.record_gradient(
 759       "DynamicStitch", _inputs_flat, _attrs, _result, name)
 760     _result, = _result
 761     return _result
 762 
 763   else:
 764     try:
 765       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 766         _ctx._context_handle, _ctx._eager_context.device_name,
 767         "DynamicStitch", name, _ctx._post_execution_callbacks, indices, data)
 768       return _result
 769     except _core._FallbackException:
 770       return dynamic_stitch_eager_fallback(
 771           indices, data, name=name, ctx=_ctx)
 772     except _core._NotOkStatusException as e:
 773       if name is not None:
 774         message = e.message + " name: " + name
 775       else:
 776         message = e.message
 777       _six.raise_from(_core._status_to_exception(e.code, message), None)
 778 
 779 
 780 def dynamic_stitch_eager_fallback(indices, data, name=None, ctx=None):
 781   r"""This is the slowpath function for Eager mode.
 782   This is for function dynamic_stitch
 783   """
 784   _ctx = ctx if ctx else _context.context()
 785   if not isinstance(indices, (list, tuple)):
 786     raise TypeError(
 787         "Expected list for 'indices' argument to "
 788         "'dynamic_stitch' Op, not %r." % indices)
 789   _attr_N = len(indices)
 790   if not isinstance(data, (list, tuple)):
 791     raise TypeError(
 792         "Expected list for 'data' argument to "
 793         "'dynamic_stitch' Op, not %r." % data)
 794   if len(data) != _attr_N:
 795     raise ValueError(
 796         "List argument 'data' to 'dynamic_stitch' Op with length %d "
 797         "must match length %d of argument 'indices'." %
 798         (len(data), _attr_N))
 799   _attr_T, data = _execute.args_to_matching_eager(list(data), _ctx)
 800   indices = _ops.convert_n_to_tensor(indices, _dtypes.int32)
 801   _inputs_flat = list(indices) + list(data)
 802   _attrs = ("N", _attr_N, "T", _attr_T)
 803   _result = _execute.execute(b"DynamicStitch", 1, inputs=_inputs_flat,
 804                              attrs=_attrs, ctx=_ctx, name=name)
 805   _execute.record_gradient(
 806       "DynamicStitch", _inputs_flat, _attrs, _result, name)
 807   _result, = _result
 808   return _result
 809 
 810 
 811 def fifo_queue(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
 812   r"""A queue that produces elements in first-in first-out order.
 813 
 814   Args:
 815     component_types: A list of `tf.DTypes` that has length `>= 1`.
 816       The type of each component in a value.
 817     shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
 818       The shape of each component in a value. The length of this attr must
 819       be either 0 or the same as the length of component_types. If the length of
 820       this attr is 0, the shapes of queue elements are not constrained, and
 821       only one element may be dequeued at a time.
 822     capacity: An optional `int`. Defaults to `-1`.
 823       The upper bound on the number of elements in this queue.
 824       Negative numbers mean no limit.
 825     container: An optional `string`. Defaults to `""`.
 826       If non-empty, this queue is placed in the given container.
 827       Otherwise, a default container is used.
 828     shared_name: An optional `string`. Defaults to `""`.
 829       If non-empty, this queue will be shared under the given name
 830       across multiple sessions.
 831     name: A name for the operation (optional).
 832 
 833   Returns:
 834     A `Tensor` of type mutable `string`.
 835   """
 836   _ctx = _context._context
 837   if _ctx is None or not _ctx._eager_context.is_eager:
 838     if not isinstance(component_types, (list, tuple)):
 839       raise TypeError(
 840           "Expected list for 'component_types' argument to "
 841           "'fifo_queue' Op, not %r." % component_types)
 842     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
 843     if shapes is None:
 844       shapes = []
 845     if not isinstance(shapes, (list, tuple)):
 846       raise TypeError(
 847           "Expected list for 'shapes' argument to "
 848           "'fifo_queue' Op, not %r." % shapes)
 849     shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
 850     if capacity is None:
 851       capacity = -1
 852     capacity = _execute.make_int(capacity, "capacity")
 853     if container is None:
 854       container = ""
 855     container = _execute.make_str(container, "container")
 856     if shared_name is None:
 857       shared_name = ""
 858     shared_name = _execute.make_str(shared_name, "shared_name")
 859     _, _, _op = _op_def_lib._apply_op_helper(
 860         "FIFOQueue", component_types=component_types, shapes=shapes,
 861         capacity=capacity, container=container, shared_name=shared_name,
 862         name=name)
 863     _result = _op.outputs[:]
 864     _inputs_flat = _op.inputs
 865     _attrs = ("component_types", _op.get_attr("component_types"), "shapes",
 866               _op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
 867               "container", _op.get_attr("container"), "shared_name",
 868               _op.get_attr("shared_name"))
 869     _execute.record_gradient(
 870       "FIFOQueue", _inputs_flat, _attrs, _result, name)
 871     _result, = _result
 872     return _result
 873 
 874   else:
 875     raise RuntimeError("fifo_queue op does not support eager execution. Arg 'handle' is a ref.")
 876 
 877 
 878   raise RuntimeError("fifo_queue op does not support eager execution. Arg 'handle' is a ref.")
 879 
 880 def fifo_queue_v2(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
 881   r"""A queue that produces elements in first-in first-out order.
 882 
 883   Args:
 884     component_types: A list of `tf.DTypes` that has length `>= 1`.
 885       The type of each component in a value.
 886     shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
 887       The shape of each component in a value. The length of this attr must
 888       be either 0 or the same as the length of component_types. If the length of
 889       this attr is 0, the shapes of queue elements are not constrained, and
 890       only one element may be dequeued at a time.
 891     capacity: An optional `int`. Defaults to `-1`.
 892       The upper bound on the number of elements in this queue.
 893       Negative numbers mean no limit.
 894     container: An optional `string`. Defaults to `""`.
 895       If non-empty, this queue is placed in the given container.
 896       Otherwise, a default container is used.
 897     shared_name: An optional `string`. Defaults to `""`.
 898       If non-empty, this queue will be shared under the given name
 899       across multiple sessions.
 900     name: A name for the operation (optional).
 901 
 902   Returns:
 903     A `Tensor` of type `resource`.
 904   """
 905   _ctx = _context._context
 906   if _ctx is None or not _ctx._eager_context.is_eager:
 907     if not isinstance(component_types, (list, tuple)):
 908       raise TypeError(
 909           "Expected list for 'component_types' argument to "
 910           "'fifo_queue_v2' Op, not %r." % component_types)
 911     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
 912     if shapes is None:
 913       shapes = []
 914     if not isinstance(shapes, (list, tuple)):
 915       raise TypeError(
 916           "Expected list for 'shapes' argument to "
 917           "'fifo_queue_v2' Op, not %r." % shapes)
 918     shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
 919     if capacity is None:
 920       capacity = -1
 921     capacity = _execute.make_int(capacity, "capacity")
 922     if container is None:
 923       container = ""
 924     container = _execute.make_str(container, "container")
 925     if shared_name is None:
 926       shared_name = ""
 927     shared_name = _execute.make_str(shared_name, "shared_name")
 928     _, _, _op = _op_def_lib._apply_op_helper(
 929         "FIFOQueueV2", component_types=component_types, shapes=shapes,
 930         capacity=capacity, container=container, shared_name=shared_name,
 931         name=name)
 932     _result = _op.outputs[:]
 933     _inputs_flat = _op.inputs
 934     _attrs = ("component_types", _op.get_attr("component_types"), "shapes",
 935               _op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
 936               "container", _op.get_attr("container"), "shared_name",
 937               _op.get_attr("shared_name"))
 938     _execute.record_gradient(
 939       "FIFOQueueV2", _inputs_flat, _attrs, _result, name)
 940     _result, = _result
 941     return _result
 942 
 943   else:
 944     try:
 945       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 946         _ctx._context_handle, _ctx._eager_context.device_name, "FIFOQueueV2",
 947         name, _ctx._post_execution_callbacks, "component_types",
 948         component_types, "shapes", shapes, "capacity", capacity, "container",
 949         container, "shared_name", shared_name)
 950       return _result
 951     except _core._FallbackException:
 952       return fifo_queue_v2_eager_fallback(
 953           component_types=component_types, shapes=shapes, capacity=capacity,
 954           container=container, shared_name=shared_name, name=name, ctx=_ctx)
 955     except _core._NotOkStatusException as e:
 956       if name is not None:
 957         message = e.message + " name: " + name
 958       else:
 959         message = e.message
 960       _six.raise_from(_core._status_to_exception(e.code, message), None)
 961 
 962 
 963 def fifo_queue_v2_eager_fallback(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None, ctx=None):
 964   r"""This is the slowpath function for Eager mode.
 965   This is for function fifo_queue_v2
 966   """
 967   _ctx = ctx if ctx else _context.context()
 968   if not isinstance(component_types, (list, tuple)):
 969     raise TypeError(
 970         "Expected list for 'component_types' argument to "
 971         "'fifo_queue_v2' Op, not %r." % component_types)
 972   component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
 973   if shapes is None:
 974     shapes = []
 975   if not isinstance(shapes, (list, tuple)):
 976     raise TypeError(
 977         "Expected list for 'shapes' argument to "
 978         "'fifo_queue_v2' Op, not %r." % shapes)
 979   shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
 980   if capacity is None:
 981     capacity = -1
 982   capacity = _execute.make_int(capacity, "capacity")
 983   if container is None:
 984     container = ""
 985   container = _execute.make_str(container, "container")
 986   if shared_name is None:
 987     shared_name = ""
 988   shared_name = _execute.make_str(shared_name, "shared_name")
 989   _inputs_flat = []
 990   _attrs = ("component_types", component_types, "shapes", shapes, "capacity",
 991   capacity, "container", container, "shared_name", shared_name)
 992   _result = _execute.execute(b"FIFOQueueV2", 1, inputs=_inputs_flat,
 993                              attrs=_attrs, ctx=_ctx, name=name)
 994   _execute.record_gradient(
 995       "FIFOQueueV2", _inputs_flat, _attrs, _result, name)
 996   _result, = _result
 997   return _result
 998 
 999 
1000 def fake_queue(resource, name=None):
1001   r"""Deprecated. Do not use.
1002 
1003   Args:
1004     resource: A `Tensor` of type `resource`.
1005     name: A name for the operation (optional).
1006 
1007   Returns:
1008     A `Tensor` of type mutable `string`.
1009   """
1010   _ctx = _context._context
1011   if _ctx is None or not _ctx._eager_context.is_eager:
1012     _, _, _op = _op_def_lib._apply_op_helper(
1013         "FakeQueue", resource=resource, name=name)
1014     _result = _op.outputs[:]
1015     _inputs_flat = _op.inputs
1016     _attrs = None
1017     _execute.record_gradient(
1018       "FakeQueue", _inputs_flat, _attrs, _result, name)
1019     _result, = _result
1020     return _result
1021 
1022   else:
1023     raise RuntimeError("fake_queue op does not support eager execution. Arg 'handle' is a ref.")
1024 
1025 
1026   raise RuntimeError("fake_queue op does not support eager execution. Arg 'handle' is a ref.")
1027 
1028 def get_session_handle(value, name=None):
1029   r"""Store the input tensor in the state of the current session.
1030 
1031   Args:
1032     value: A `Tensor`. The tensor to be stored.
1033     name: A name for the operation (optional).
1034 
1035   Returns:
1036     A `Tensor` of type `string`.
1037   """
1038   _ctx = _context._context
1039   if _ctx is None or not _ctx._eager_context.is_eager:
1040     _, _, _op = _op_def_lib._apply_op_helper(
1041         "GetSessionHandle", value=value, name=name)
1042     _result = _op.outputs[:]
1043     _inputs_flat = _op.inputs
1044     _attrs = ("T", _op.get_attr("T"))
1045     _execute.record_gradient(
1046       "GetSessionHandle", _inputs_flat, _attrs, _result, name)
1047     _result, = _result
1048     return _result
1049 
1050   else:
1051     try:
1052       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1053         _ctx._context_handle, _ctx._eager_context.device_name,
1054         "GetSessionHandle", name, _ctx._post_execution_callbacks, value)
1055       return _result
1056     except _core._FallbackException:
1057       return get_session_handle_eager_fallback(
1058           value, name=name, ctx=_ctx)
1059     except _core._NotOkStatusException as e:
1060       if name is not None:
1061         message = e.message + " name: " + name
1062       else:
1063         message = e.message
1064       _six.raise_from(_core._status_to_exception(e.code, message), None)
1065 
1066 
1067 def get_session_handle_eager_fallback(value, name=None, ctx=None):
1068   r"""This is the slowpath function for Eager mode.
1069   This is for function get_session_handle
1070   """
1071   _ctx = ctx if ctx else _context.context()
1072   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
1073   _inputs_flat = [value]
1074   _attrs = ("T", _attr_T)
1075   _result = _execute.execute(b"GetSessionHandle", 1, inputs=_inputs_flat,
1076                              attrs=_attrs, ctx=_ctx, name=name)
1077   _execute.record_gradient(
1078       "GetSessionHandle", _inputs_flat, _attrs, _result, name)
1079   _result, = _result
1080   return _result
1081 
1082 
1083 def get_session_handle_v2(value, name=None):
1084   r"""Store the input tensor in the state of the current session.
1085 
1086   Args:
1087     value: A `Tensor`. The tensor to be stored.
1088     name: A name for the operation (optional).
1089 
1090   Returns:
1091     A `Tensor` of type `resource`.
1092   """
1093   _ctx = _context._context
1094   if _ctx is None or not _ctx._eager_context.is_eager:
1095     _, _, _op = _op_def_lib._apply_op_helper(
1096         "GetSessionHandleV2", value=value, name=name)
1097     _result = _op.outputs[:]
1098     _inputs_flat = _op.inputs
1099     _attrs = ("T", _op.get_attr("T"))
1100     _execute.record_gradient(
1101       "GetSessionHandleV2", _inputs_flat, _attrs, _result, name)
1102     _result, = _result
1103     return _result
1104 
1105   else:
1106     try:
1107       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1108         _ctx._context_handle, _ctx._eager_context.device_name,
1109         "GetSessionHandleV2", name, _ctx._post_execution_callbacks, value)
1110       return _result
1111     except _core._FallbackException:
1112       return get_session_handle_v2_eager_fallback(
1113           value, name=name, ctx=_ctx)
1114     except _core._NotOkStatusException as e:
1115       if name is not None:
1116         message = e.message + " name: " + name
1117       else:
1118         message = e.message
1119       _six.raise_from(_core._status_to_exception(e.code, message), None)
1120 
1121 
1122 def get_session_handle_v2_eager_fallback(value, name=None, ctx=None):
1123   r"""This is the slowpath function for Eager mode.
1124   This is for function get_session_handle_v2
1125   """
1126   _ctx = ctx if ctx else _context.context()
1127   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
1128   _inputs_flat = [value]
1129   _attrs = ("T", _attr_T)
1130   _result = _execute.execute(b"GetSessionHandleV2", 1, inputs=_inputs_flat,
1131                              attrs=_attrs, ctx=_ctx, name=name)
1132   _execute.record_gradient(
1133       "GetSessionHandleV2", _inputs_flat, _attrs, _result, name)
1134   _result, = _result
1135   return _result
1136 
1137 
1138 def get_session_tensor(handle, dtype, name=None):
1139   r"""Get the value of the tensor specified by its handle.
1140 
1141   Args:
1142     handle: A `Tensor` of type `string`.
1143       The handle for a tensor stored in the session state.
1144     dtype: A `tf.DType`. The type of the output value.
1145     name: A name for the operation (optional).
1146 
1147   Returns:
1148     A `Tensor` of type `dtype`.
1149   """
1150   _ctx = _context._context
1151   if _ctx is None or not _ctx._eager_context.is_eager:
1152     dtype = _execute.make_type(dtype, "dtype")
1153     _, _, _op = _op_def_lib._apply_op_helper(
1154         "GetSessionTensor", handle=handle, dtype=dtype, name=name)
1155     _result = _op.outputs[:]
1156     _inputs_flat = _op.inputs
1157     _attrs = ("dtype", _op.get_attr("dtype"))
1158     _execute.record_gradient(
1159       "GetSessionTensor", _inputs_flat, _attrs, _result, name)
1160     _result, = _result
1161     return _result
1162 
1163   else:
1164     try:
1165       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1166         _ctx._context_handle, _ctx._eager_context.device_name,
1167         "GetSessionTensor", name, _ctx._post_execution_callbacks, handle,
1168         "dtype", dtype)
1169       return _result
1170     except _core._FallbackException:
1171       return get_session_tensor_eager_fallback(
1172           handle, dtype=dtype, name=name, ctx=_ctx)
1173     except _core._NotOkStatusException as e:
1174       if name is not None:
1175         message = e.message + " name: " + name
1176       else:
1177         message = e.message
1178       _six.raise_from(_core._status_to_exception(e.code, message), None)
1179 
1180 
1181 def get_session_tensor_eager_fallback(handle, dtype, name=None, ctx=None):
1182   r"""This is the slowpath function for Eager mode.
1183   This is for function get_session_tensor
1184   """
1185   _ctx = ctx if ctx else _context.context()
1186   dtype = _execute.make_type(dtype, "dtype")
1187   handle = _ops.convert_to_tensor(handle, _dtypes.string)
1188   _inputs_flat = [handle]
1189   _attrs = ("dtype", dtype)
1190   _result = _execute.execute(b"GetSessionTensor", 1, inputs=_inputs_flat,
1191                              attrs=_attrs, ctx=_ctx, name=name)
1192   _execute.record_gradient(
1193       "GetSessionTensor", _inputs_flat, _attrs, _result, name)
1194   _result, = _result
1195   return _result
1196 
1197 
1198 def map_clear(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
1199   r"""Op removes all elements in the underlying container.
1200 
1201   Args:
1202     dtypes: A list of `tf.DTypes`.
1203     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
1204     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
1205     container: An optional `string`. Defaults to `""`.
1206     shared_name: An optional `string`. Defaults to `""`.
1207     name: A name for the operation (optional).
1208 
1209   Returns:
1210     The created Operation.
1211   """
1212   _ctx = _context._context
1213   if _ctx is None or not _ctx._eager_context.is_eager:
1214     if not isinstance(dtypes, (list, tuple)):
1215       raise TypeError(
1216           "Expected list for 'dtypes' argument to "
1217           "'map_clear' Op, not %r." % dtypes)
1218     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1219     if capacity is None:
1220       capacity = 0
1221     capacity = _execute.make_int(capacity, "capacity")
1222     if memory_limit is None:
1223       memory_limit = 0
1224     memory_limit = _execute.make_int(memory_limit, "memory_limit")
1225     if container is None:
1226       container = ""
1227     container = _execute.make_str(container, "container")
1228     if shared_name is None:
1229       shared_name = ""
1230     shared_name = _execute.make_str(shared_name, "shared_name")
1231     _, _, _op = _op_def_lib._apply_op_helper(
1232         "MapClear", dtypes=dtypes, capacity=capacity,
1233         memory_limit=memory_limit, container=container,
1234         shared_name=shared_name, name=name)
1235     return _op
1236     _result = None
1237     return _result
1238 
1239   else:
1240     try:
1241       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1242         _ctx._context_handle, _ctx._eager_context.device_name, "MapClear",
1243         name, _ctx._post_execution_callbacks, "capacity", capacity,
1244         "memory_limit", memory_limit, "dtypes", dtypes, "container",
1245         container, "shared_name", shared_name)
1246       return _result
1247     except _core._FallbackException:
1248       return map_clear_eager_fallback(
1249           capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
1250           container=container, shared_name=shared_name, name=name, ctx=_ctx)
1251     except _core._NotOkStatusException as e:
1252       if name is not None:
1253         message = e.message + " name: " + name
1254       else:
1255         message = e.message
1256       _six.raise_from(_core._status_to_exception(e.code, message), None)
1257 
1258 
1259 def map_clear_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
1260   r"""This is the slowpath function for Eager mode.
1261   This is for function map_clear
1262   """
1263   _ctx = ctx if ctx else _context.context()
1264   if not isinstance(dtypes, (list, tuple)):
1265     raise TypeError(
1266         "Expected list for 'dtypes' argument to "
1267         "'map_clear' Op, not %r." % dtypes)
1268   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1269   if capacity is None:
1270     capacity = 0
1271   capacity = _execute.make_int(capacity, "capacity")
1272   if memory_limit is None:
1273     memory_limit = 0
1274   memory_limit = _execute.make_int(memory_limit, "memory_limit")
1275   if container is None:
1276     container = ""
1277   container = _execute.make_str(container, "container")
1278   if shared_name is None:
1279     shared_name = ""
1280   shared_name = _execute.make_str(shared_name, "shared_name")
1281   _inputs_flat = []
1282   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
1283   dtypes, "container", container, "shared_name", shared_name)
1284   _result = _execute.execute(b"MapClear", 0, inputs=_inputs_flat,
1285                              attrs=_attrs, ctx=_ctx, name=name)
1286   _result = None
1287   return _result
1288 
1289 
1290 def map_incomplete_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
1291   r"""Op returns the number of incomplete elements in the underlying container.
1292 
1293   Args:
1294     dtypes: A list of `tf.DTypes`.
1295     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
1296     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
1297     container: An optional `string`. Defaults to `""`.
1298     shared_name: An optional `string`. Defaults to `""`.
1299     name: A name for the operation (optional).
1300 
1301   Returns:
1302     A `Tensor` of type `int32`.
1303   """
1304   _ctx = _context._context
1305   if _ctx is None or not _ctx._eager_context.is_eager:
1306     if not isinstance(dtypes, (list, tuple)):
1307       raise TypeError(
1308           "Expected list for 'dtypes' argument to "
1309           "'map_incomplete_size' Op, not %r." % dtypes)
1310     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1311     if capacity is None:
1312       capacity = 0
1313     capacity = _execute.make_int(capacity, "capacity")
1314     if memory_limit is None:
1315       memory_limit = 0
1316     memory_limit = _execute.make_int(memory_limit, "memory_limit")
1317     if container is None:
1318       container = ""
1319     container = _execute.make_str(container, "container")
1320     if shared_name is None:
1321       shared_name = ""
1322     shared_name = _execute.make_str(shared_name, "shared_name")
1323     _, _, _op = _op_def_lib._apply_op_helper(
1324         "MapIncompleteSize", dtypes=dtypes, capacity=capacity,
1325         memory_limit=memory_limit, container=container,
1326         shared_name=shared_name, name=name)
1327     _result = _op.outputs[:]
1328     _inputs_flat = _op.inputs
1329     _attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
1330               _op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
1331               "container", _op.get_attr("container"), "shared_name",
1332               _op.get_attr("shared_name"))
1333     _execute.record_gradient(
1334       "MapIncompleteSize", _inputs_flat, _attrs, _result, name)
1335     _result, = _result
1336     return _result
1337 
1338   else:
1339     try:
1340       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1341         _ctx._context_handle, _ctx._eager_context.device_name,
1342         "MapIncompleteSize", name, _ctx._post_execution_callbacks, "capacity",
1343         capacity, "memory_limit", memory_limit, "dtypes", dtypes, "container",
1344         container, "shared_name", shared_name)
1345       return _result
1346     except _core._FallbackException:
1347       return map_incomplete_size_eager_fallback(
1348           capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
1349           container=container, shared_name=shared_name, name=name, ctx=_ctx)
1350     except _core._NotOkStatusException as e:
1351       if name is not None:
1352         message = e.message + " name: " + name
1353       else:
1354         message = e.message
1355       _six.raise_from(_core._status_to_exception(e.code, message), None)
1356 
1357 
1358 def map_incomplete_size_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
1359   r"""This is the slowpath function for Eager mode.
1360   This is for function map_incomplete_size
1361   """
1362   _ctx = ctx if ctx else _context.context()
1363   if not isinstance(dtypes, (list, tuple)):
1364     raise TypeError(
1365         "Expected list for 'dtypes' argument to "
1366         "'map_incomplete_size' Op, not %r." % dtypes)
1367   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1368   if capacity is None:
1369     capacity = 0
1370   capacity = _execute.make_int(capacity, "capacity")
1371   if memory_limit is None:
1372     memory_limit = 0
1373   memory_limit = _execute.make_int(memory_limit, "memory_limit")
1374   if container is None:
1375     container = ""
1376   container = _execute.make_str(container, "container")
1377   if shared_name is None:
1378     shared_name = ""
1379   shared_name = _execute.make_str(shared_name, "shared_name")
1380   _inputs_flat = []
1381   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
1382   dtypes, "container", container, "shared_name", shared_name)
1383   _result = _execute.execute(b"MapIncompleteSize", 1, inputs=_inputs_flat,
1384                              attrs=_attrs, ctx=_ctx, name=name)
1385   _execute.record_gradient(
1386       "MapIncompleteSize", _inputs_flat, _attrs, _result, name)
1387   _result, = _result
1388   return _result
1389 
1390 
1391 def map_peek(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
1392   r"""Op peeks at the values at the specified key.  If the
1393 
1394   underlying container does not contain this key
1395   this op will block until it does.
1396 
1397   Args:
1398     key: A `Tensor` of type `int64`.
1399     indices: A `Tensor` of type `int32`.
1400     dtypes: A list of `tf.DTypes` that has length `>= 1`.
1401     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
1402     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
1403     container: An optional `string`. Defaults to `""`.
1404     shared_name: An optional `string`. Defaults to `""`.
1405     name: A name for the operation (optional).
1406 
1407   Returns:
1408     A list of `Tensor` objects of type `dtypes`.
1409   """
1410   _ctx = _context._context
1411   if _ctx is None or not _ctx._eager_context.is_eager:
1412     if not isinstance(dtypes, (list, tuple)):
1413       raise TypeError(
1414           "Expected list for 'dtypes' argument to "
1415           "'map_peek' Op, not %r." % dtypes)
1416     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1417     if capacity is None:
1418       capacity = 0
1419     capacity = _execute.make_int(capacity, "capacity")
1420     if memory_limit is None:
1421       memory_limit = 0
1422     memory_limit = _execute.make_int(memory_limit, "memory_limit")
1423     if container is None:
1424       container = ""
1425     container = _execute.make_str(container, "container")
1426     if shared_name is None:
1427       shared_name = ""
1428     shared_name = _execute.make_str(shared_name, "shared_name")
1429     _, _, _op = _op_def_lib._apply_op_helper(
1430         "MapPeek", key=key, indices=indices, dtypes=dtypes, capacity=capacity,
1431         memory_limit=memory_limit, container=container,
1432         shared_name=shared_name, name=name)
1433     _result = _op.outputs[:]
1434     if not _result:
1435       return _op
1436     _inputs_flat = _op.inputs
1437     _attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
1438               _op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
1439               "container", _op.get_attr("container"), "shared_name",
1440               _op.get_attr("shared_name"))
1441     _execute.record_gradient(
1442       "MapPeek", _inputs_flat, _attrs, _result, name)
1443     return _result
1444 
1445   else:
1446     try:
1447       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1448         _ctx._context_handle, _ctx._eager_context.device_name, "MapPeek",
1449         name, _ctx._post_execution_callbacks, key, indices, "capacity",
1450         capacity, "memory_limit", memory_limit, "dtypes", dtypes, "container",
1451         container, "shared_name", shared_name)
1452       return _result
1453     except _core._FallbackException:
1454       return map_peek_eager_fallback(
1455           key, indices, capacity=capacity, memory_limit=memory_limit,
1456           dtypes=dtypes, container=container, shared_name=shared_name,
1457           name=name, ctx=_ctx)
1458     except _core._NotOkStatusException as e:
1459       if name is not None:
1460         message = e.message + " name: " + name
1461       else:
1462         message = e.message
1463       _six.raise_from(_core._status_to_exception(e.code, message), None)
1464 
1465 
1466 def map_peek_eager_fallback(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
1467   r"""This is the slowpath function for Eager mode.
1468   This is for function map_peek
1469   """
1470   _ctx = ctx if ctx else _context.context()
1471   if not isinstance(dtypes, (list, tuple)):
1472     raise TypeError(
1473         "Expected list for 'dtypes' argument to "
1474         "'map_peek' Op, not %r." % dtypes)
1475   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1476   if capacity is None:
1477     capacity = 0
1478   capacity = _execute.make_int(capacity, "capacity")
1479   if memory_limit is None:
1480     memory_limit = 0
1481   memory_limit = _execute.make_int(memory_limit, "memory_limit")
1482   if container is None:
1483     container = ""
1484   container = _execute.make_str(container, "container")
1485   if shared_name is None:
1486     shared_name = ""
1487   shared_name = _execute.make_str(shared_name, "shared_name")
1488   key = _ops.convert_to_tensor(key, _dtypes.int64)
1489   indices = _ops.convert_to_tensor(indices, _dtypes.int32)
1490   _inputs_flat = [key, indices]
1491   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
1492   dtypes, "container", container, "shared_name", shared_name)
1493   _result = _execute.execute(b"MapPeek", len(dtypes), inputs=_inputs_flat,
1494                              attrs=_attrs, ctx=_ctx, name=name)
1495   _execute.record_gradient(
1496       "MapPeek", _inputs_flat, _attrs, _result, name)
1497   return _result
1498 
1499 
1500 def map_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
1501   r"""Op returns the number of elements in the underlying container.
1502 
1503   Args:
1504     dtypes: A list of `tf.DTypes`.
1505     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
1506     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
1507     container: An optional `string`. Defaults to `""`.
1508     shared_name: An optional `string`. Defaults to `""`.
1509     name: A name for the operation (optional).
1510 
1511   Returns:
1512     A `Tensor` of type `int32`.
1513   """
1514   _ctx = _context._context
1515   if _ctx is None or not _ctx._eager_context.is_eager:
1516     if not isinstance(dtypes, (list, tuple)):
1517       raise TypeError(
1518           "Expected list for 'dtypes' argument to "
1519           "'map_size' Op, not %r." % dtypes)
1520     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1521     if capacity is None:
1522       capacity = 0
1523     capacity = _execute.make_int(capacity, "capacity")
1524     if memory_limit is None:
1525       memory_limit = 0
1526     memory_limit = _execute.make_int(memory_limit, "memory_limit")
1527     if container is None:
1528       container = ""
1529     container = _execute.make_str(container, "container")
1530     if shared_name is None:
1531       shared_name = ""
1532     shared_name = _execute.make_str(shared_name, "shared_name")
1533     _, _, _op = _op_def_lib._apply_op_helper(
1534         "MapSize", dtypes=dtypes, capacity=capacity,
1535         memory_limit=memory_limit, container=container,
1536         shared_name=shared_name, name=name)
1537     _result = _op.outputs[:]
1538     _inputs_flat = _op.inputs
1539     _attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
1540               _op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
1541               "container", _op.get_attr("container"), "shared_name",
1542               _op.get_attr("shared_name"))
1543     _execute.record_gradient(
1544       "MapSize", _inputs_flat, _attrs, _result, name)
1545     _result, = _result
1546     return _result
1547 
1548   else:
1549     try:
1550       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1551         _ctx._context_handle, _ctx._eager_context.device_name, "MapSize",
1552         name, _ctx._post_execution_callbacks, "capacity", capacity,
1553         "memory_limit", memory_limit, "dtypes", dtypes, "container",
1554         container, "shared_name", shared_name)
1555       return _result
1556     except _core._FallbackException:
1557       return map_size_eager_fallback(
1558           capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
1559           container=container, shared_name=shared_name, name=name, ctx=_ctx)
1560     except _core._NotOkStatusException as e:
1561       if name is not None:
1562         message = e.message + " name: " + name
1563       else:
1564         message = e.message
1565       _six.raise_from(_core._status_to_exception(e.code, message), None)
1566 
1567 
1568 def map_size_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
1569   r"""This is the slowpath function for Eager mode.
1570   This is for function map_size
1571   """
1572   _ctx = ctx if ctx else _context.context()
1573   if not isinstance(dtypes, (list, tuple)):
1574     raise TypeError(
1575         "Expected list for 'dtypes' argument to "
1576         "'map_size' Op, not %r." % dtypes)
1577   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1578   if capacity is None:
1579     capacity = 0
1580   capacity = _execute.make_int(capacity, "capacity")
1581   if memory_limit is None:
1582     memory_limit = 0
1583   memory_limit = _execute.make_int(memory_limit, "memory_limit")
1584   if container is None:
1585     container = ""
1586   container = _execute.make_str(container, "container")
1587   if shared_name is None:
1588     shared_name = ""
1589   shared_name = _execute.make_str(shared_name, "shared_name")
1590   _inputs_flat = []
1591   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
1592   dtypes, "container", container, "shared_name", shared_name)
1593   _result = _execute.execute(b"MapSize", 1, inputs=_inputs_flat, attrs=_attrs,
1594                              ctx=_ctx, name=name)
1595   _execute.record_gradient(
1596       "MapSize", _inputs_flat, _attrs, _result, name)
1597   _result, = _result
1598   return _result
1599 
1600 
1601 def map_stage(key, indices, values, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
1602   r"""Stage (key, values) in the underlying container which behaves like a hashtable.
1603 
1604   Args:
1605     key: A `Tensor` of type `int64`. int64
1606     indices: A `Tensor` of type `int32`.
1607     values: A list of `Tensor` objects. a list of tensors
1608       dtypes A list of data types that inserted values should adhere to.
1609     dtypes: A list of `tf.DTypes`.
1610     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
1611       Maximum number of elements in the Staging Area. If > 0, inserts
1612       on the container will block when the capacity is reached.
1613     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
1614     container: An optional `string`. Defaults to `""`.
1615       If non-empty, this queue is placed in the given container. Otherwise,
1616       a default container is used.
1617     shared_name: An optional `string`. Defaults to `""`.
1618       It is necessary to match this name to the matching Unstage Op.
1619     name: A name for the operation (optional).
1620 
1621   Returns:
1622     The created Operation.
1623   """
1624   _ctx = _context._context
1625   if _ctx is None or not _ctx._eager_context.is_eager:
1626     if not isinstance(dtypes, (list, tuple)):
1627       raise TypeError(
1628           "Expected list for 'dtypes' argument to "
1629           "'map_stage' Op, not %r." % dtypes)
1630     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1631     if capacity is None:
1632       capacity = 0
1633     capacity = _execute.make_int(capacity, "capacity")
1634     if memory_limit is None:
1635       memory_limit = 0
1636     memory_limit = _execute.make_int(memory_limit, "memory_limit")
1637     if container is None:
1638       container = ""
1639     container = _execute.make_str(container, "container")
1640     if shared_name is None:
1641       shared_name = ""
1642     shared_name = _execute.make_str(shared_name, "shared_name")
1643     _, _, _op = _op_def_lib._apply_op_helper(
1644         "MapStage", key=key, indices=indices, values=values, dtypes=dtypes,
1645         capacity=capacity, memory_limit=memory_limit, container=container,
1646         shared_name=shared_name, name=name)
1647     return _op
1648     _result = None
1649     return _result
1650 
1651   else:
1652     try:
1653       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1654         _ctx._context_handle, _ctx._eager_context.device_name, "MapStage",
1655         name, _ctx._post_execution_callbacks, key, indices, values,
1656         "capacity", capacity, "memory_limit", memory_limit, "dtypes", dtypes,
1657         "container", container, "shared_name", shared_name)
1658       return _result
1659     except _core._FallbackException:
1660       return map_stage_eager_fallback(
1661           key, indices, values, capacity=capacity, memory_limit=memory_limit,
1662           dtypes=dtypes, container=container, shared_name=shared_name,
1663           name=name, ctx=_ctx)
1664     except _core._NotOkStatusException as e:
1665       if name is not None:
1666         message = e.message + " name: " + name
1667       else:
1668         message = e.message
1669       _six.raise_from(_core._status_to_exception(e.code, message), None)
1670 
1671 
1672 def map_stage_eager_fallback(key, indices, values, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
1673   r"""This is the slowpath function for Eager mode.
1674   This is for function map_stage
1675   """
1676   _ctx = ctx if ctx else _context.context()
1677   if not isinstance(dtypes, (list, tuple)):
1678     raise TypeError(
1679         "Expected list for 'dtypes' argument to "
1680         "'map_stage' Op, not %r." % dtypes)
1681   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1682   if capacity is None:
1683     capacity = 0
1684   capacity = _execute.make_int(capacity, "capacity")
1685   if memory_limit is None:
1686     memory_limit = 0
1687   memory_limit = _execute.make_int(memory_limit, "memory_limit")
1688   if container is None:
1689     container = ""
1690   container = _execute.make_str(container, "container")
1691   if shared_name is None:
1692     shared_name = ""
1693   shared_name = _execute.make_str(shared_name, "shared_name")
1694   _attr_fake_dtypes, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)
1695   key = _ops.convert_to_tensor(key, _dtypes.int64)
1696   indices = _ops.convert_to_tensor(indices, _dtypes.int32)
1697   _inputs_flat = [key, indices] + list(values)
1698   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
1699   dtypes, "fake_dtypes", _attr_fake_dtypes, "container", container,
1700   "shared_name", shared_name)
1701   _result = _execute.execute(b"MapStage", 0, inputs=_inputs_flat,
1702                              attrs=_attrs, ctx=_ctx, name=name)
1703   _result = None
1704   return _result
1705 
1706 
1707 def map_unstage(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
1708   r"""Op removes and returns the values associated with the key
1709 
1710   from the underlying container.   If the underlying container
1711   does not contain this key, the op will block until it does.
1712 
1713   Args:
1714     key: A `Tensor` of type `int64`.
1715     indices: A `Tensor` of type `int32`.
1716     dtypes: A list of `tf.DTypes` that has length `>= 1`.
1717     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
1718     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
1719     container: An optional `string`. Defaults to `""`.
1720     shared_name: An optional `string`. Defaults to `""`.
1721     name: A name for the operation (optional).
1722 
1723   Returns:
1724     A list of `Tensor` objects of type `dtypes`.
1725   """
1726   _ctx = _context._context
1727   if _ctx is None or not _ctx._eager_context.is_eager:
1728     if not isinstance(dtypes, (list, tuple)):
1729       raise TypeError(
1730           "Expected list for 'dtypes' argument to "
1731           "'map_unstage' Op, not %r." % dtypes)
1732     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1733     if capacity is None:
1734       capacity = 0
1735     capacity = _execute.make_int(capacity, "capacity")
1736     if memory_limit is None:
1737       memory_limit = 0
1738     memory_limit = _execute.make_int(memory_limit, "memory_limit")
1739     if container is None:
1740       container = ""
1741     container = _execute.make_str(container, "container")
1742     if shared_name is None:
1743       shared_name = ""
1744     shared_name = _execute.make_str(shared_name, "shared_name")
1745     _, _, _op = _op_def_lib._apply_op_helper(
1746         "MapUnstage", key=key, indices=indices, dtypes=dtypes,
1747         capacity=capacity, memory_limit=memory_limit, container=container,
1748         shared_name=shared_name, name=name)
1749     _result = _op.outputs[:]
1750     if not _result:
1751       return _op
1752     _inputs_flat = _op.inputs
1753     _attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
1754               _op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
1755               "container", _op.get_attr("container"), "shared_name",
1756               _op.get_attr("shared_name"))
1757     _execute.record_gradient(
1758       "MapUnstage", _inputs_flat, _attrs, _result, name)
1759     return _result
1760 
1761   else:
1762     try:
1763       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1764         _ctx._context_handle, _ctx._eager_context.device_name, "MapUnstage",
1765         name, _ctx._post_execution_callbacks, key, indices, "capacity",
1766         capacity, "memory_limit", memory_limit, "dtypes", dtypes, "container",
1767         container, "shared_name", shared_name)
1768       return _result
1769     except _core._FallbackException:
1770       return map_unstage_eager_fallback(
1771           key, indices, capacity=capacity, memory_limit=memory_limit,
1772           dtypes=dtypes, container=container, shared_name=shared_name,
1773           name=name, ctx=_ctx)
1774     except _core._NotOkStatusException as e:
1775       if name is not None:
1776         message = e.message + " name: " + name
1777       else:
1778         message = e.message
1779       _six.raise_from(_core._status_to_exception(e.code, message), None)
1780 
1781 
1782 def map_unstage_eager_fallback(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
1783   r"""This is the slowpath function for Eager mode.
1784   This is for function map_unstage
1785   """
1786   _ctx = ctx if ctx else _context.context()
1787   if not isinstance(dtypes, (list, tuple)):
1788     raise TypeError(
1789         "Expected list for 'dtypes' argument to "
1790         "'map_unstage' Op, not %r." % dtypes)
1791   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1792   if capacity is None:
1793     capacity = 0
1794   capacity = _execute.make_int(capacity, "capacity")
1795   if memory_limit is None:
1796     memory_limit = 0
1797   memory_limit = _execute.make_int(memory_limit, "memory_limit")
1798   if container is None:
1799     container = ""
1800   container = _execute.make_str(container, "container")
1801   if shared_name is None:
1802     shared_name = ""
1803   shared_name = _execute.make_str(shared_name, "shared_name")
1804   key = _ops.convert_to_tensor(key, _dtypes.int64)
1805   indices = _ops.convert_to_tensor(indices, _dtypes.int32)
1806   _inputs_flat = [key, indices]
1807   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
1808   dtypes, "container", container, "shared_name", shared_name)
1809   _result = _execute.execute(b"MapUnstage", len(dtypes), inputs=_inputs_flat,
1810                              attrs=_attrs, ctx=_ctx, name=name)
1811   _execute.record_gradient(
1812       "MapUnstage", _inputs_flat, _attrs, _result, name)
1813   return _result
1814 
1815 
1816 _map_unstage_no_key_outputs = ["key", "values"]
1817 _MapUnstageNoKeyOutput = _collections.namedtuple(
1818     "MapUnstageNoKey", _map_unstage_no_key_outputs)
1819 
1820 
1821 def map_unstage_no_key(indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
1822   r"""Op removes and returns a random (key, value)
1823 
1824   from the underlying container.   If the underlying container
1825   does not contain elements, the op will block until it does.
1826 
1827   Args:
1828     indices: A `Tensor` of type `int32`.
1829     dtypes: A list of `tf.DTypes` that has length `>= 1`.
1830     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
1831     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
1832     container: An optional `string`. Defaults to `""`.
1833     shared_name: An optional `string`. Defaults to `""`.
1834     name: A name for the operation (optional).
1835 
1836   Returns:
1837     A tuple of `Tensor` objects (key, values).
1838 
1839     key: A `Tensor` of type `int64`.
1840     values: A list of `Tensor` objects of type `dtypes`.
1841   """
1842   _ctx = _context._context
1843   if _ctx is None or not _ctx._eager_context.is_eager:
1844     if not isinstance(dtypes, (list, tuple)):
1845       raise TypeError(
1846           "Expected list for 'dtypes' argument to "
1847           "'map_unstage_no_key' Op, not %r." % dtypes)
1848     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1849     if capacity is None:
1850       capacity = 0
1851     capacity = _execute.make_int(capacity, "capacity")
1852     if memory_limit is None:
1853       memory_limit = 0
1854     memory_limit = _execute.make_int(memory_limit, "memory_limit")
1855     if container is None:
1856       container = ""
1857     container = _execute.make_str(container, "container")
1858     if shared_name is None:
1859       shared_name = ""
1860     shared_name = _execute.make_str(shared_name, "shared_name")
1861     _, _, _op = _op_def_lib._apply_op_helper(
1862         "MapUnstageNoKey", indices=indices, dtypes=dtypes, capacity=capacity,
1863         memory_limit=memory_limit, container=container,
1864         shared_name=shared_name, name=name)
1865     _result = _op.outputs[:]
1866     _inputs_flat = _op.inputs
1867     _attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
1868               _op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
1869               "container", _op.get_attr("container"), "shared_name",
1870               _op.get_attr("shared_name"))
1871     _execute.record_gradient(
1872       "MapUnstageNoKey", _inputs_flat, _attrs, _result, name)
1873     _result = _result[:1] + [_result[1:]]
1874     _result = _MapUnstageNoKeyOutput._make(_result)
1875     return _result
1876 
1877   else:
1878     try:
1879       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1880         _ctx._context_handle, _ctx._eager_context.device_name,
1881         "MapUnstageNoKey", name, _ctx._post_execution_callbacks, indices,
1882         "capacity", capacity, "memory_limit", memory_limit, "dtypes", dtypes,
1883         "container", container, "shared_name", shared_name)
1884       _result = _MapUnstageNoKeyOutput._make(_result)
1885       return _result
1886     except _core._FallbackException:
1887       return map_unstage_no_key_eager_fallback(
1888           indices, capacity=capacity, memory_limit=memory_limit,
1889           dtypes=dtypes, container=container, shared_name=shared_name,
1890           name=name, ctx=_ctx)
1891     except _core._NotOkStatusException as e:
1892       if name is not None:
1893         message = e.message + " name: " + name
1894       else:
1895         message = e.message
1896       _six.raise_from(_core._status_to_exception(e.code, message), None)
1897 
1898 
1899 def map_unstage_no_key_eager_fallback(indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
1900   r"""This is the slowpath function for Eager mode.
1901   This is for function map_unstage_no_key
1902   """
1903   _ctx = ctx if ctx else _context.context()
1904   if not isinstance(dtypes, (list, tuple)):
1905     raise TypeError(
1906         "Expected list for 'dtypes' argument to "
1907         "'map_unstage_no_key' Op, not %r." % dtypes)
1908   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1909   if capacity is None:
1910     capacity = 0
1911   capacity = _execute.make_int(capacity, "capacity")
1912   if memory_limit is None:
1913     memory_limit = 0
1914   memory_limit = _execute.make_int(memory_limit, "memory_limit")
1915   if container is None:
1916     container = ""
1917   container = _execute.make_str(container, "container")
1918   if shared_name is None:
1919     shared_name = ""
1920   shared_name = _execute.make_str(shared_name, "shared_name")
1921   indices = _ops.convert_to_tensor(indices, _dtypes.int32)
1922   _inputs_flat = [indices]
1923   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
1924   dtypes, "container", container, "shared_name", shared_name)
1925   _result = _execute.execute(b"MapUnstageNoKey", len(dtypes) + 1,
1926                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
1927                              name=name)
1928   _execute.record_gradient(
1929       "MapUnstageNoKey", _inputs_flat, _attrs, _result, name)
1930   _result = _result[:1] + [_result[1:]]
1931   _result = _MapUnstageNoKeyOutput._make(_result)
1932   return _result
1933 
1934 
1935 def ordered_map_clear(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
1936   r"""Op removes all elements in the underlying container.
1937 
1938   Args:
1939     dtypes: A list of `tf.DTypes`.
1940     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
1941     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
1942     container: An optional `string`. Defaults to `""`.
1943     shared_name: An optional `string`. Defaults to `""`.
1944     name: A name for the operation (optional).
1945 
1946   Returns:
1947     The created Operation.
1948   """
1949   _ctx = _context._context
1950   if _ctx is None or not _ctx._eager_context.is_eager:
1951     if not isinstance(dtypes, (list, tuple)):
1952       raise TypeError(
1953           "Expected list for 'dtypes' argument to "
1954           "'ordered_map_clear' Op, not %r." % dtypes)
1955     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
1956     if capacity is None:
1957       capacity = 0
1958     capacity = _execute.make_int(capacity, "capacity")
1959     if memory_limit is None:
1960       memory_limit = 0
1961     memory_limit = _execute.make_int(memory_limit, "memory_limit")
1962     if container is None:
1963       container = ""
1964     container = _execute.make_str(container, "container")
1965     if shared_name is None:
1966       shared_name = ""
1967     shared_name = _execute.make_str(shared_name, "shared_name")
1968     _, _, _op = _op_def_lib._apply_op_helper(
1969         "OrderedMapClear", dtypes=dtypes, capacity=capacity,
1970         memory_limit=memory_limit, container=container,
1971         shared_name=shared_name, name=name)
1972     return _op
1973     _result = None
1974     return _result
1975 
1976   else:
1977     try:
1978       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1979         _ctx._context_handle, _ctx._eager_context.device_name,
1980         "OrderedMapClear", name, _ctx._post_execution_callbacks, "capacity",
1981         capacity, "memory_limit", memory_limit, "dtypes", dtypes, "container",
1982         container, "shared_name", shared_name)
1983       return _result
1984     except _core._FallbackException:
1985       return ordered_map_clear_eager_fallback(
1986           capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
1987           container=container, shared_name=shared_name, name=name, ctx=_ctx)
1988     except _core._NotOkStatusException as e:
1989       if name is not None:
1990         message = e.message + " name: " + name
1991       else:
1992         message = e.message
1993       _six.raise_from(_core._status_to_exception(e.code, message), None)
1994 
1995 
1996 def ordered_map_clear_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
1997   r"""This is the slowpath function for Eager mode.
1998   This is for function ordered_map_clear
1999   """
2000   _ctx = ctx if ctx else _context.context()
2001   if not isinstance(dtypes, (list, tuple)):
2002     raise TypeError(
2003         "Expected list for 'dtypes' argument to "
2004         "'ordered_map_clear' Op, not %r." % dtypes)
2005   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
2006   if capacity is None:
2007     capacity = 0
2008   capacity = _execute.make_int(capacity, "capacity")
2009   if memory_limit is None:
2010     memory_limit = 0
2011   memory_limit = _execute.make_int(memory_limit, "memory_limit")
2012   if container is None:
2013     container = ""
2014   container = _execute.make_str(container, "container")
2015   if shared_name is None:
2016     shared_name = ""
2017   shared_name = _execute.make_str(shared_name, "shared_name")
2018   _inputs_flat = []
2019   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
2020   dtypes, "container", container, "shared_name", shared_name)
2021   _result = _execute.execute(b"OrderedMapClear", 0, inputs=_inputs_flat,
2022                              attrs=_attrs, ctx=_ctx, name=name)
2023   _result = None
2024   return _result
2025 
2026 
2027 def ordered_map_incomplete_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
2028   r"""Op returns the number of incomplete elements in the underlying container.
2029 
2030   Args:
2031     dtypes: A list of `tf.DTypes`.
2032     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
2033     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
2034     container: An optional `string`. Defaults to `""`.
2035     shared_name: An optional `string`. Defaults to `""`.
2036     name: A name for the operation (optional).
2037 
2038   Returns:
2039     A `Tensor` of type `int32`.
2040   """
2041   _ctx = _context._context
2042   if _ctx is None or not _ctx._eager_context.is_eager:
2043     if not isinstance(dtypes, (list, tuple)):
2044       raise TypeError(
2045           "Expected list for 'dtypes' argument to "
2046           "'ordered_map_incomplete_size' Op, not %r." % dtypes)
2047     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
2048     if capacity is None:
2049       capacity = 0
2050     capacity = _execute.make_int(capacity, "capacity")
2051     if memory_limit is None:
2052       memory_limit = 0
2053     memory_limit = _execute.make_int(memory_limit, "memory_limit")
2054     if container is None:
2055       container = ""
2056     container = _execute.make_str(container, "container")
2057     if shared_name is None:
2058       shared_name = ""
2059     shared_name = _execute.make_str(shared_name, "shared_name")
2060     _, _, _op = _op_def_lib._apply_op_helper(
2061         "OrderedMapIncompleteSize", dtypes=dtypes, capacity=capacity,
2062         memory_limit=memory_limit, container=container,
2063         shared_name=shared_name, name=name)
2064     _result = _op.outputs[:]
2065     _inputs_flat = _op.inputs
2066     _attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
2067               _op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
2068               "container", _op.get_attr("container"), "shared_name",
2069               _op.get_attr("shared_name"))
2070     _execute.record_gradient(
2071       "OrderedMapIncompleteSize", _inputs_flat, _attrs, _result, name)
2072     _result, = _result
2073     return _result
2074 
2075   else:
2076     try:
2077       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2078         _ctx._context_handle, _ctx._eager_context.device_name,
2079         "OrderedMapIncompleteSize", name, _ctx._post_execution_callbacks,
2080         "capacity", capacity, "memory_limit", memory_limit, "dtypes", dtypes,
2081         "container", container, "shared_name", shared_name)
2082       return _result
2083     except _core._FallbackException:
2084       return ordered_map_incomplete_size_eager_fallback(
2085           capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
2086           container=container, shared_name=shared_name, name=name, ctx=_ctx)
2087     except _core._NotOkStatusException as e:
2088       if name is not None:
2089         message = e.message + " name: " + name
2090       else:
2091         message = e.message
2092       _six.raise_from(_core._status_to_exception(e.code, message), None)
2093 
2094 
2095 def ordered_map_incomplete_size_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
2096   r"""This is the slowpath function for Eager mode.
2097   This is for function ordered_map_incomplete_size
2098   """
2099   _ctx = ctx if ctx else _context.context()
2100   if not isinstance(dtypes, (list, tuple)):
2101     raise TypeError(
2102         "Expected list for 'dtypes' argument to "
2103         "'ordered_map_incomplete_size' Op, not %r." % dtypes)
2104   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
2105   if capacity is None:
2106     capacity = 0
2107   capacity = _execute.make_int(capacity, "capacity")
2108   if memory_limit is None:
2109     memory_limit = 0
2110   memory_limit = _execute.make_int(memory_limit, "memory_limit")
2111   if container is None:
2112     container = ""
2113   container = _execute.make_str(container, "container")
2114   if shared_name is None:
2115     shared_name = ""
2116   shared_name = _execute.make_str(shared_name, "shared_name")
2117   _inputs_flat = []
2118   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
2119   dtypes, "container", container, "shared_name", shared_name)
2120   _result = _execute.execute(b"OrderedMapIncompleteSize", 1,
2121                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
2122                              name=name)
2123   _execute.record_gradient(
2124       "OrderedMapIncompleteSize", _inputs_flat, _attrs, _result, name)
2125   _result, = _result
2126   return _result
2127 
2128 
2129 def ordered_map_peek(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
2130   r"""Op peeks at the values at the specified key.  If the
2131 
2132   underlying container does not contain this key
2133   this op will block until it does.   This Op is optimized for
2134   performance.
2135 
2136   Args:
2137     key: A `Tensor` of type `int64`.
2138     indices: A `Tensor` of type `int32`.
2139     dtypes: A list of `tf.DTypes` that has length `>= 1`.
2140     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
2141     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
2142     container: An optional `string`. Defaults to `""`.
2143     shared_name: An optional `string`. Defaults to `""`.
2144     name: A name for the operation (optional).
2145 
2146   Returns:
2147     A list of `Tensor` objects of type `dtypes`.
2148   """
2149   _ctx = _context._context
2150   if _ctx is None or not _ctx._eager_context.is_eager:
2151     if not isinstance(dtypes, (list, tuple)):
2152       raise TypeError(
2153           "Expected list for 'dtypes' argument to "
2154           "'ordered_map_peek' Op, not %r." % dtypes)
2155     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
2156     if capacity is None:
2157       capacity = 0
2158     capacity = _execute.make_int(capacity, "capacity")
2159     if memory_limit is None:
2160       memory_limit = 0
2161     memory_limit = _execute.make_int(memory_limit, "memory_limit")
2162     if container is None:
2163       container = ""
2164     container = _execute.make_str(container, "container")
2165     if shared_name is None:
2166       shared_name = ""
2167     shared_name = _execute.make_str(shared_name, "shared_name")
2168     _, _, _op = _op_def_lib._apply_op_helper(
2169         "OrderedMapPeek", key=key, indices=indices, dtypes=dtypes,
2170         capacity=capacity, memory_limit=memory_limit, container=container,
2171         shared_name=shared_name, name=name)
2172     _result = _op.outputs[:]
2173     if not _result:
2174       return _op
2175     _inputs_flat = _op.inputs
2176     _attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
2177               _op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
2178               "container", _op.get_attr("container"), "shared_name",
2179               _op.get_attr("shared_name"))
2180     _execute.record_gradient(
2181       "OrderedMapPeek", _inputs_flat, _attrs, _result, name)
2182     return _result
2183 
2184   else:
2185     try:
2186       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2187         _ctx._context_handle, _ctx._eager_context.device_name,
2188         "OrderedMapPeek", name, _ctx._post_execution_callbacks, key, indices,
2189         "capacity", capacity, "memory_limit", memory_limit, "dtypes", dtypes,
2190         "container", container, "shared_name", shared_name)
2191       return _result
2192     except _core._FallbackException:
2193       return ordered_map_peek_eager_fallback(
2194           key, indices, capacity=capacity, memory_limit=memory_limit,
2195           dtypes=dtypes, container=container, shared_name=shared_name,
2196           name=name, ctx=_ctx)
2197     except _core._NotOkStatusException as e:
2198       if name is not None:
2199         message = e.message + " name: " + name
2200       else:
2201         message = e.message
2202       _six.raise_from(_core._status_to_exception(e.code, message), None)
2203 
2204 
2205 def ordered_map_peek_eager_fallback(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
2206   r"""This is the slowpath function for Eager mode.
2207   This is for function ordered_map_peek
2208   """
2209   _ctx = ctx if ctx else _context.context()
2210   if not isinstance(dtypes, (list, tuple)):
2211     raise TypeError(
2212         "Expected list for 'dtypes' argument to "
2213         "'ordered_map_peek' Op, not %r." % dtypes)
2214   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
2215   if capacity is None:
2216     capacity = 0
2217   capacity = _execute.make_int(capacity, "capacity")
2218   if memory_limit is None:
2219     memory_limit = 0
2220   memory_limit = _execute.make_int(memory_limit, "memory_limit")
2221   if container is None:
2222     container = ""
2223   container = _execute.make_str(container, "container")
2224   if shared_name is None:
2225     shared_name = ""
2226   shared_name = _execute.make_str(shared_name, "shared_name")
2227   key = _ops.convert_to_tensor(key, _dtypes.int64)
2228   indices = _ops.convert_to_tensor(indices, _dtypes.int32)
2229   _inputs_flat = [key, indices]
2230   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
2231   dtypes, "container", container, "shared_name", shared_name)
2232   _result = _execute.execute(b"OrderedMapPeek", len(dtypes),
2233                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
2234                              name=name)
2235   _execute.record_gradient(
2236       "OrderedMapPeek", _inputs_flat, _attrs, _result, name)
2237   return _result
2238 
2239 
2240 def ordered_map_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
2241   r"""Op returns the number of elements in the underlying container.
2242 
2243   Args:
2244     dtypes: A list of `tf.DTypes`.
2245     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
2246     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
2247     container: An optional `string`. Defaults to `""`.
2248     shared_name: An optional `string`. Defaults to `""`.
2249     name: A name for the operation (optional).
2250 
2251   Returns:
2252     A `Tensor` of type `int32`.
2253   """
2254   _ctx = _context._context
2255   if _ctx is None or not _ctx._eager_context.is_eager:
2256     if not isinstance(dtypes, (list, tuple)):
2257       raise TypeError(
2258           "Expected list for 'dtypes' argument to "
2259           "'ordered_map_size' Op, not %r." % dtypes)
2260     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
2261     if capacity is None:
2262       capacity = 0
2263     capacity = _execute.make_int(capacity, "capacity")
2264     if memory_limit is None:
2265       memory_limit = 0
2266     memory_limit = _execute.make_int(memory_limit, "memory_limit")
2267     if container is None:
2268       container = ""
2269     container = _execute.make_str(container, "container")
2270     if shared_name is None:
2271       shared_name = ""
2272     shared_name = _execute.make_str(shared_name, "shared_name")
2273     _, _, _op = _op_def_lib._apply_op_helper(
2274         "OrderedMapSize", dtypes=dtypes, capacity=capacity,
2275         memory_limit=memory_limit, container=container,
2276         shared_name=shared_name, name=name)
2277     _result = _op.outputs[:]
2278     _inputs_flat = _op.inputs
2279     _attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
2280               _op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
2281               "container", _op.get_attr("container"), "shared_name",
2282               _op.get_attr("shared_name"))
2283     _execute.record_gradient(
2284       "OrderedMapSize", _inputs_flat, _attrs, _result, name)
2285     _result, = _result
2286     return _result
2287 
2288   else:
2289     try:
2290       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2291         _ctx._context_handle, _ctx._eager_context.device_name,
2292         "OrderedMapSize", name, _ctx._post_execution_callbacks, "capacity",
2293         capacity, "memory_limit", memory_limit, "dtypes", dtypes, "container",
2294         container, "shared_name", shared_name)
2295       return _result
2296     except _core._FallbackException:
2297       return ordered_map_size_eager_fallback(
2298           capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
2299           container=container, shared_name=shared_name, name=name, ctx=_ctx)
2300     except _core._NotOkStatusException as e:
2301       if name is not None:
2302         message = e.message + " name: " + name
2303       else:
2304         message = e.message
2305       _six.raise_from(_core._status_to_exception(e.code, message), None)
2306 
2307 
2308 def ordered_map_size_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
2309   r"""This is the slowpath function for Eager mode.
2310   This is for function ordered_map_size
2311   """
2312   _ctx = ctx if ctx else _context.context()
2313   if not isinstance(dtypes, (list, tuple)):
2314     raise TypeError(
2315         "Expected list for 'dtypes' argument to "
2316         "'ordered_map_size' Op, not %r." % dtypes)
2317   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
2318   if capacity is None:
2319     capacity = 0
2320   capacity = _execute.make_int(capacity, "capacity")
2321   if memory_limit is None:
2322     memory_limit = 0
2323   memory_limit = _execute.make_int(memory_limit, "memory_limit")
2324   if container is None:
2325     container = ""
2326   container = _execute.make_str(container, "container")
2327   if shared_name is None:
2328     shared_name = ""
2329   shared_name = _execute.make_str(shared_name, "shared_name")
2330   _inputs_flat = []
2331   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
2332   dtypes, "container", container, "shared_name", shared_name)
2333   _result = _execute.execute(b"OrderedMapSize", 1, inputs=_inputs_flat,
2334                              attrs=_attrs, ctx=_ctx, name=name)
2335   _execute.record_gradient(
2336       "OrderedMapSize", _inputs_flat, _attrs, _result, name)
2337   _result, = _result
2338   return _result
2339 
2340 
2341 def ordered_map_stage(key, indices, values, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
2342   r"""Stage (key, values) in the underlying container which behaves like a ordered
2343 
2344   associative container.   Elements are ordered by key.
2345 
2346   Args:
2347     key: A `Tensor` of type `int64`. int64
2348     indices: A `Tensor` of type `int32`.
2349     values: A list of `Tensor` objects. a list of tensors
2350       dtypes A list of data types that inserted values should adhere to.
2351     dtypes: A list of `tf.DTypes`.
2352     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
2353       Maximum number of elements in the Staging Area. If > 0, inserts
2354       on the container will block when the capacity is reached.
2355     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
2356     container: An optional `string`. Defaults to `""`.
2357       If non-empty, this queue is placed in the given container. Otherwise,
2358       a default container is used.
2359     shared_name: An optional `string`. Defaults to `""`.
2360       It is necessary to match this name to the matching Unstage Op.
2361     name: A name for the operation (optional).
2362 
2363   Returns:
2364     The created Operation.
2365   """
2366   _ctx = _context._context
2367   if _ctx is None or not _ctx._eager_context.is_eager:
2368     if not isinstance(dtypes, (list, tuple)):
2369       raise TypeError(
2370           "Expected list for 'dtypes' argument to "
2371           "'ordered_map_stage' Op, not %r." % dtypes)
2372     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
2373     if capacity is None:
2374       capacity = 0
2375     capacity = _execute.make_int(capacity, "capacity")
2376     if memory_limit is None:
2377       memory_limit = 0
2378     memory_limit = _execute.make_int(memory_limit, "memory_limit")
2379     if container is None:
2380       container = ""
2381     container = _execute.make_str(container, "container")
2382     if shared_name is None:
2383       shared_name = ""
2384     shared_name = _execute.make_str(shared_name, "shared_name")
2385     _, _, _op = _op_def_lib._apply_op_helper(
2386         "OrderedMapStage", key=key, indices=indices, values=values,
2387         dtypes=dtypes, capacity=capacity, memory_limit=memory_limit,
2388         container=container, shared_name=shared_name, name=name)
2389     return _op
2390     _result = None
2391     return _result
2392 
2393   else:
2394     try:
2395       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2396         _ctx._context_handle, _ctx._eager_context.device_name,
2397         "OrderedMapStage", name, _ctx._post_execution_callbacks, key, indices,
2398         values, "capacity", capacity, "memory_limit", memory_limit, "dtypes",
2399         dtypes, "container", container, "shared_name", shared_name)
2400       return _result
2401     except _core._FallbackException:
2402       return ordered_map_stage_eager_fallback(
2403           key, indices, values, capacity=capacity, memory_limit=memory_limit,
2404           dtypes=dtypes, container=container, shared_name=shared_name,
2405           name=name, ctx=_ctx)
2406     except _core._NotOkStatusException as e:
2407       if name is not None:
2408         message = e.message + " name: " + name
2409       else:
2410         message = e.message
2411       _six.raise_from(_core._status_to_exception(e.code, message), None)
2412 
2413 
2414 def ordered_map_stage_eager_fallback(key, indices, values, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
2415   r"""This is the slowpath function for Eager mode.
2416   This is for function ordered_map_stage
2417   """
2418   _ctx = ctx if ctx else _context.context()
2419   if not isinstance(dtypes, (list, tuple)):
2420     raise TypeError(
2421         "Expected list for 'dtypes' argument to "
2422         "'ordered_map_stage' Op, not %r." % dtypes)
2423   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
2424   if capacity is None:
2425     capacity = 0
2426   capacity = _execute.make_int(capacity, "capacity")
2427   if memory_limit is None:
2428     memory_limit = 0
2429   memory_limit = _execute.make_int(memory_limit, "memory_limit")
2430   if container is None:
2431     container = ""
2432   container = _execute.make_str(container, "container")
2433   if shared_name is None:
2434     shared_name = ""
2435   shared_name = _execute.make_str(shared_name, "shared_name")
2436   _attr_fake_dtypes, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)
2437   key = _ops.convert_to_tensor(key, _dtypes.int64)
2438   indices = _ops.convert_to_tensor(indices, _dtypes.int32)
2439   _inputs_flat = [key, indices] + list(values)
2440   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
2441   dtypes, "fake_dtypes", _attr_fake_dtypes, "container", container,
2442   "shared_name", shared_name)
2443   _result = _execute.execute(b"OrderedMapStage", 0, inputs=_inputs_flat,
2444                              attrs=_attrs, ctx=_ctx, name=name)
2445   _result = None
2446   return _result
2447 
2448 
2449 def ordered_map_unstage(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
2450   r"""Op removes and returns the values associated with the key
2451 
2452   from the underlying container.   If the underlying container
2453   does not contain this key, the op will block until it does.
2454 
2455   Args:
2456     key: A `Tensor` of type `int64`.
2457     indices: A `Tensor` of type `int32`.
2458     dtypes: A list of `tf.DTypes` that has length `>= 1`.
2459     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
2460     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
2461     container: An optional `string`. Defaults to `""`.
2462     shared_name: An optional `string`. Defaults to `""`.
2463     name: A name for the operation (optional).
2464 
2465   Returns:
2466     A list of `Tensor` objects of type `dtypes`.
2467   """
2468   _ctx = _context._context
2469   if _ctx is None or not _ctx._eager_context.is_eager:
2470     if not isinstance(dtypes, (list, tuple)):
2471       raise TypeError(
2472           "Expected list for 'dtypes' argument to "
2473           "'ordered_map_unstage' Op, not %r." % dtypes)
2474     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
2475     if capacity is None:
2476       capacity = 0
2477     capacity = _execute.make_int(capacity, "capacity")
2478     if memory_limit is None:
2479       memory_limit = 0
2480     memory_limit = _execute.make_int(memory_limit, "memory_limit")
2481     if container is None:
2482       container = ""
2483     container = _execute.make_str(container, "container")
2484     if shared_name is None:
2485       shared_name = ""
2486     shared_name = _execute.make_str(shared_name, "shared_name")
2487     _, _, _op = _op_def_lib._apply_op_helper(
2488         "OrderedMapUnstage", key=key, indices=indices, dtypes=dtypes,
2489         capacity=capacity, memory_limit=memory_limit, container=container,
2490         shared_name=shared_name, name=name)
2491     _result = _op.outputs[:]
2492     if not _result:
2493       return _op
2494     _inputs_flat = _op.inputs
2495     _attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
2496               _op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
2497               "container", _op.get_attr("container"), "shared_name",
2498               _op.get_attr("shared_name"))
2499     _execute.record_gradient(
2500       "OrderedMapUnstage", _inputs_flat, _attrs, _result, name)
2501     return _result
2502 
2503   else:
2504     try:
2505       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2506         _ctx._context_handle, _ctx._eager_context.device_name,
2507         "OrderedMapUnstage", name, _ctx._post_execution_callbacks, key,
2508         indices, "capacity", capacity, "memory_limit", memory_limit, "dtypes",
2509         dtypes, "container", container, "shared_name", shared_name)
2510       return _result
2511     except _core._FallbackException:
2512       return ordered_map_unstage_eager_fallback(
2513           key, indices, capacity=capacity, memory_limit=memory_limit,
2514           dtypes=dtypes, container=container, shared_name=shared_name,
2515           name=name, ctx=_ctx)
2516     except _core._NotOkStatusException as e:
2517       if name is not None:
2518         message = e.message + " name: " + name
2519       else:
2520         message = e.message
2521       _six.raise_from(_core._status_to_exception(e.code, message), None)
2522 
2523 
2524 def ordered_map_unstage_eager_fallback(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
2525   r"""This is the slowpath function for Eager mode.
2526   This is for function ordered_map_unstage
2527   """
2528   _ctx = ctx if ctx else _context.context()
2529   if not isinstance(dtypes, (list, tuple)):
2530     raise TypeError(
2531         "Expected list for 'dtypes' argument to "
2532         "'ordered_map_unstage' Op, not %r." % dtypes)
2533   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
2534   if capacity is None:
2535     capacity = 0
2536   capacity = _execute.make_int(capacity, "capacity")
2537   if memory_limit is None:
2538     memory_limit = 0
2539   memory_limit = _execute.make_int(memory_limit, "memory_limit")
2540   if container is None:
2541     container = ""
2542   container = _execute.make_str(container, "container")
2543   if shared_name is None:
2544     shared_name = ""
2545   shared_name = _execute.make_str(shared_name, "shared_name")
2546   key = _ops.convert_to_tensor(key, _dtypes.int64)
2547   indices = _ops.convert_to_tensor(indices, _dtypes.int32)
2548   _inputs_flat = [key, indices]
2549   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
2550   dtypes, "container", container, "shared_name", shared_name)
2551   _result = _execute.execute(b"OrderedMapUnstage", len(dtypes),
2552                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
2553                              name=name)
2554   _execute.record_gradient(
2555       "OrderedMapUnstage", _inputs_flat, _attrs, _result, name)
2556   return _result
2557 
2558 
2559 _ordered_map_unstage_no_key_outputs = ["key", "values"]
2560 _OrderedMapUnstageNoKeyOutput = _collections.namedtuple(
2561     "OrderedMapUnstageNoKey", _ordered_map_unstage_no_key_outputs)
2562 
2563 
2564 def ordered_map_unstage_no_key(indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
2565   r"""Op removes and returns the (key, value) element with the smallest
2566 
2567   key from the underlying container.   If the underlying container
2568   does not contain elements, the op will block until it does.
2569 
2570   Args:
2571     indices: A `Tensor` of type `int32`.
2572     dtypes: A list of `tf.DTypes` that has length `>= 1`.
2573     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
2574     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
2575     container: An optional `string`. Defaults to `""`.
2576     shared_name: An optional `string`. Defaults to `""`.
2577     name: A name for the operation (optional).
2578 
2579   Returns:
2580     A tuple of `Tensor` objects (key, values).
2581 
2582     key: A `Tensor` of type `int64`.
2583     values: A list of `Tensor` objects of type `dtypes`.
2584   """
2585   _ctx = _context._context
2586   if _ctx is None or not _ctx._eager_context.is_eager:
2587     if not isinstance(dtypes, (list, tuple)):
2588       raise TypeError(
2589           "Expected list for 'dtypes' argument to "
2590           "'ordered_map_unstage_no_key' Op, not %r." % dtypes)
2591     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
2592     if capacity is None:
2593       capacity = 0
2594     capacity = _execute.make_int(capacity, "capacity")
2595     if memory_limit is None:
2596       memory_limit = 0
2597     memory_limit = _execute.make_int(memory_limit, "memory_limit")
2598     if container is None:
2599       container = ""
2600     container = _execute.make_str(container, "container")
2601     if shared_name is None:
2602       shared_name = ""
2603     shared_name = _execute.make_str(shared_name, "shared_name")
2604     _, _, _op = _op_def_lib._apply_op_helper(
2605         "OrderedMapUnstageNoKey", indices=indices, dtypes=dtypes,
2606         capacity=capacity, memory_limit=memory_limit, container=container,
2607         shared_name=shared_name, name=name)
2608     _result = _op.outputs[:]
2609     _inputs_flat = _op.inputs
2610     _attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
2611               _op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
2612               "container", _op.get_attr("container"), "shared_name",
2613               _op.get_attr("shared_name"))
2614     _execute.record_gradient(
2615       "OrderedMapUnstageNoKey", _inputs_flat, _attrs, _result, name)
2616     _result = _result[:1] + [_result[1:]]
2617     _result = _OrderedMapUnstageNoKeyOutput._make(_result)
2618     return _result
2619 
2620   else:
2621     try:
2622       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2623         _ctx._context_handle, _ctx._eager_context.device_name,
2624         "OrderedMapUnstageNoKey", name, _ctx._post_execution_callbacks,
2625         indices, "capacity", capacity, "memory_limit", memory_limit, "dtypes",
2626         dtypes, "container", container, "shared_name", shared_name)
2627       _result = _OrderedMapUnstageNoKeyOutput._make(_result)
2628       return _result
2629     except _core._FallbackException:
2630       return ordered_map_unstage_no_key_eager_fallback(
2631           indices, capacity=capacity, memory_limit=memory_limit,
2632           dtypes=dtypes, container=container, shared_name=shared_name,
2633           name=name, ctx=_ctx)
2634     except _core._NotOkStatusException as e:
2635       if name is not None:
2636         message = e.message + " name: " + name
2637       else:
2638         message = e.message
2639       _six.raise_from(_core._status_to_exception(e.code, message), None)
2640 
2641 
2642 def ordered_map_unstage_no_key_eager_fallback(indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
2643   r"""This is the slowpath function for Eager mode.
2644   This is for function ordered_map_unstage_no_key
2645   """
2646   _ctx = ctx if ctx else _context.context()
2647   if not isinstance(dtypes, (list, tuple)):
2648     raise TypeError(
2649         "Expected list for 'dtypes' argument to "
2650         "'ordered_map_unstage_no_key' Op, not %r." % dtypes)
2651   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
2652   if capacity is None:
2653     capacity = 0
2654   capacity = _execute.make_int(capacity, "capacity")
2655   if memory_limit is None:
2656     memory_limit = 0
2657   memory_limit = _execute.make_int(memory_limit, "memory_limit")
2658   if container is None:
2659     container = ""
2660   container = _execute.make_str(container, "container")
2661   if shared_name is None:
2662     shared_name = ""
2663   shared_name = _execute.make_str(shared_name, "shared_name")
2664   indices = _ops.convert_to_tensor(indices, _dtypes.int32)
2665   _inputs_flat = [indices]
2666   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
2667   dtypes, "container", container, "shared_name", shared_name)
2668   _result = _execute.execute(b"OrderedMapUnstageNoKey", len(dtypes) + 1,
2669                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
2670                              name=name)
2671   _execute.record_gradient(
2672       "OrderedMapUnstageNoKey", _inputs_flat, _attrs, _result, name)
2673   _result = _result[:1] + [_result[1:]]
2674   _result = _OrderedMapUnstageNoKeyOutput._make(_result)
2675   return _result
2676 
2677 
2678 def padding_fifo_queue(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
2679   r"""A queue that produces elements in first-in first-out order.
2680 
2681   Variable-size shapes are allowed by setting the corresponding shape dimensions
2682   to 0 in the shape attr.  In this case DequeueMany will pad up to the maximum
2683   size of any given element in the minibatch.  See below for details.
2684 
2685   Args:
2686     component_types: A list of `tf.DTypes` that has length `>= 1`.
2687       The type of each component in a value.
2688     shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
2689       The shape of each component in a value. The length of this attr must
2690       be either 0 or the same as the length of component_types.
2691       Shapes of fixed rank but variable size are allowed by setting
2692       any shape dimension to -1.  In this case, the inputs' shape may vary along
2693       the given dimension, and DequeueMany will pad the given dimension with
2694       zeros up to the maximum shape of all elements in the given batch.
2695       If the length of this attr is 0, different queue elements may have
2696       different ranks and shapes, but only one element may be dequeued at a time.
2697     capacity: An optional `int`. Defaults to `-1`.
2698       The upper bound on the number of elements in this queue.
2699       Negative numbers mean no limit.
2700     container: An optional `string`. Defaults to `""`.
2701       If non-empty, this queue is placed in the given container.
2702       Otherwise, a default container is used.
2703     shared_name: An optional `string`. Defaults to `""`.
2704       If non-empty, this queue will be shared under the given name
2705       across multiple sessions.
2706     name: A name for the operation (optional).
2707 
2708   Returns:
2709     A `Tensor` of type mutable `string`.
2710   """
2711   _ctx = _context._context
2712   if _ctx is None or not _ctx._eager_context.is_eager:
2713     if not isinstance(component_types, (list, tuple)):
2714       raise TypeError(
2715           "Expected list for 'component_types' argument to "
2716           "'padding_fifo_queue' Op, not %r." % component_types)
2717     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
2718     if shapes is None:
2719       shapes = []
2720     if not isinstance(shapes, (list, tuple)):
2721       raise TypeError(
2722           "Expected list for 'shapes' argument to "
2723           "'padding_fifo_queue' Op, not %r." % shapes)
2724     shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
2725     if capacity is None:
2726       capacity = -1
2727     capacity = _execute.make_int(capacity, "capacity")
2728     if container is None:
2729       container = ""
2730     container = _execute.make_str(container, "container")
2731     if shared_name is None:
2732       shared_name = ""
2733     shared_name = _execute.make_str(shared_name, "shared_name")
2734     _, _, _op = _op_def_lib._apply_op_helper(
2735         "PaddingFIFOQueue", component_types=component_types, shapes=shapes,
2736         capacity=capacity, container=container, shared_name=shared_name,
2737         name=name)
2738     _result = _op.outputs[:]
2739     _inputs_flat = _op.inputs
2740     _attrs = ("component_types", _op.get_attr("component_types"), "shapes",
2741               _op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
2742               "container", _op.get_attr("container"), "shared_name",
2743               _op.get_attr("shared_name"))
2744     _execute.record_gradient(
2745       "PaddingFIFOQueue", _inputs_flat, _attrs, _result, name)
2746     _result, = _result
2747     return _result
2748 
2749   else:
2750     raise RuntimeError("padding_fifo_queue op does not support eager execution. Arg 'handle' is a ref.")
2751 
2752 
2753   raise RuntimeError("padding_fifo_queue op does not support eager execution. Arg 'handle' is a ref.")
2754 
2755 def padding_fifo_queue_v2(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
2756   r"""A queue that produces elements in first-in first-out order.
2757 
2758   Variable-size shapes are allowed by setting the corresponding shape dimensions
2759   to 0 in the shape attr.  In this case DequeueMany will pad up to the maximum
2760   size of any given element in the minibatch.  See below for details.
2761 
2762   Args:
2763     component_types: A list of `tf.DTypes` that has length `>= 1`.
2764       The type of each component in a value.
2765     shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
2766       The shape of each component in a value. The length of this attr must
2767       be either 0 or the same as the length of component_types.
2768       Shapes of fixed rank but variable size are allowed by setting
2769       any shape dimension to -1.  In this case, the inputs' shape may vary along
2770       the given dimension, and DequeueMany will pad the given dimension with
2771       zeros up to the maximum shape of all elements in the given batch.
2772       If the length of this attr is 0, different queue elements may have
2773       different ranks and shapes, but only one element may be dequeued at a time.
2774     capacity: An optional `int`. Defaults to `-1`.
2775       The upper bound on the number of elements in this queue.
2776       Negative numbers mean no limit.
2777     container: An optional `string`. Defaults to `""`.
2778       If non-empty, this queue is placed in the given container.
2779       Otherwise, a default container is used.
2780     shared_name: An optional `string`. Defaults to `""`.
2781       If non-empty, this queue will be shared under the given name
2782       across multiple sessions.
2783     name: A name for the operation (optional).
2784 
2785   Returns:
2786     A `Tensor` of type `resource`.
2787   """
2788   _ctx = _context._context
2789   if _ctx is None or not _ctx._eager_context.is_eager:
2790     if not isinstance(component_types, (list, tuple)):
2791       raise TypeError(
2792           "Expected list for 'component_types' argument to "
2793           "'padding_fifo_queue_v2' Op, not %r." % component_types)
2794     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
2795     if shapes is None:
2796       shapes = []
2797     if not isinstance(shapes, (list, tuple)):
2798       raise TypeError(
2799           "Expected list for 'shapes' argument to "
2800           "'padding_fifo_queue_v2' Op, not %r." % shapes)
2801     shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
2802     if capacity is None:
2803       capacity = -1
2804     capacity = _execute.make_int(capacity, "capacity")
2805     if container is None:
2806       container = ""
2807     container = _execute.make_str(container, "container")
2808     if shared_name is None:
2809       shared_name = ""
2810     shared_name = _execute.make_str(shared_name, "shared_name")
2811     _, _, _op = _op_def_lib._apply_op_helper(
2812         "PaddingFIFOQueueV2", component_types=component_types, shapes=shapes,
2813         capacity=capacity, container=container, shared_name=shared_name,
2814         name=name)
2815     _result = _op.outputs[:]
2816     _inputs_flat = _op.inputs
2817     _attrs = ("component_types", _op.get_attr("component_types"), "shapes",
2818               _op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
2819               "container", _op.get_attr("container"), "shared_name",
2820               _op.get_attr("shared_name"))
2821     _execute.record_gradient(
2822       "PaddingFIFOQueueV2", _inputs_flat, _attrs, _result, name)
2823     _result, = _result
2824     return _result
2825 
2826   else:
2827     try:
2828       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2829         _ctx._context_handle, _ctx._eager_context.device_name,
2830         "PaddingFIFOQueueV2", name, _ctx._post_execution_callbacks,
2831         "component_types", component_types, "shapes", shapes, "capacity",
2832         capacity, "container", container, "shared_name", shared_name)
2833       return _result
2834     except _core._FallbackException:
2835       return padding_fifo_queue_v2_eager_fallback(
2836           component_types=component_types, shapes=shapes, capacity=capacity,
2837           container=container, shared_name=shared_name, name=name, ctx=_ctx)
2838     except _core._NotOkStatusException as e:
2839       if name is not None:
2840         message = e.message + " name: " + name
2841       else:
2842         message = e.message
2843       _six.raise_from(_core._status_to_exception(e.code, message), None)
2844 
2845 
2846 def padding_fifo_queue_v2_eager_fallback(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None, ctx=None):
2847   r"""This is the slowpath function for Eager mode.
2848   This is for function padding_fifo_queue_v2
2849   """
2850   _ctx = ctx if ctx else _context.context()
2851   if not isinstance(component_types, (list, tuple)):
2852     raise TypeError(
2853         "Expected list for 'component_types' argument to "
2854         "'padding_fifo_queue_v2' Op, not %r." % component_types)
2855   component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
2856   if shapes is None:
2857     shapes = []
2858   if not isinstance(shapes, (list, tuple)):
2859     raise TypeError(
2860         "Expected list for 'shapes' argument to "
2861         "'padding_fifo_queue_v2' Op, not %r." % shapes)
2862   shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
2863   if capacity is None:
2864     capacity = -1
2865   capacity = _execute.make_int(capacity, "capacity")
2866   if container is None:
2867     container = ""
2868   container = _execute.make_str(container, "container")
2869   if shared_name is None:
2870     shared_name = ""
2871   shared_name = _execute.make_str(shared_name, "shared_name")
2872   _inputs_flat = []
2873   _attrs = ("component_types", component_types, "shapes", shapes, "capacity",
2874   capacity, "container", container, "shared_name", shared_name)
2875   _result = _execute.execute(b"PaddingFIFOQueueV2", 1, inputs=_inputs_flat,
2876                              attrs=_attrs, ctx=_ctx, name=name)
2877   _execute.record_gradient(
2878       "PaddingFIFOQueueV2", _inputs_flat, _attrs, _result, name)
2879   _result, = _result
2880   return _result
2881 
2882 
2883 def parallel_dynamic_stitch(indices, data, name=None):
2884   r"""Interleave the values from the `data` tensors into a single tensor.
2885 
2886   Builds a merged tensor such that
2887 
2888   ```python
2889       merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
2890   ```
2891 
2892   For example, if each `indices[m]` is scalar or vector, we have
2893 
2894   ```python
2895       # Scalar indices:
2896       merged[indices[m], ...] = data[m][...]
2897 
2898       # Vector indices:
2899       merged[indices[m][i], ...] = data[m][i, ...]
2900   ```
2901 
2902   Each `data[i].shape` must start with the corresponding `indices[i].shape`,
2903   and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
2904   must have `data[i].shape = indices[i].shape + constant`.  In terms of this
2905   `constant`, the output shape is
2906 
2907       merged.shape = [max(indices)] + constant
2908 
2909   Values may be merged in parallel, so if an index appears in both `indices[m][i]`
2910   and `indices[n][j]`, the result may be invalid. This differs from the normal
2911   DynamicStitch operator that defines the behavior in that case.
2912 
2913   For example:
2914 
2915   ```python
2916       indices[0] = 6
2917       indices[1] = [4, 1]
2918       indices[2] = [[5, 2], [0, 3]]
2919       data[0] = [61, 62]
2920       data[1] = [[41, 42], [11, 12]]
2921       data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
2922       merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
2923                 [51, 52], [61, 62]]
2924   ```
2925 
2926   This method can be used to merge partitions created by `dynamic_partition`
2927   as illustrated on the following example:
2928 
2929   ```python
2930       # Apply function (increments x_i) on elements for which a certain condition
2931       # apply (x_i != -1 in this example).
2932       x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
2933       condition_mask=tf.not_equal(x,tf.constant(-1.))
2934       partitioned_data = tf.dynamic_partition(
2935           x, tf.cast(condition_mask, tf.int32) , 2)
2936       partitioned_data[1] = partitioned_data[1] + 1.0
2937       condition_indices = tf.dynamic_partition(
2938           tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
2939       x = tf.dynamic_stitch(condition_indices, partitioned_data)
2940       # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
2941       # unchanged.
2942   ```
2943 
2944   <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
2945   <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
2946   </div>
2947 
2948   Args:
2949     indices: A list of at least 1 `Tensor` objects with type `int32`.
2950     data: A list with the same length as `indices` of `Tensor` objects with the same type.
2951     name: A name for the operation (optional).
2952 
2953   Returns:
2954     A `Tensor`. Has the same type as `data`.
2955   """
2956   _ctx = _context._context
2957   if _ctx is None or not _ctx._eager_context.is_eager:
2958     if not isinstance(indices, (list, tuple)):
2959       raise TypeError(
2960           "Expected list for 'indices' argument to "
2961           "'parallel_dynamic_stitch' Op, not %r." % indices)
2962     _attr_N = len(indices)
2963     if not isinstance(data, (list, tuple)):
2964       raise TypeError(
2965           "Expected list for 'data' argument to "
2966           "'parallel_dynamic_stitch' Op, not %r." % data)
2967     if len(data) != _attr_N:
2968       raise ValueError(
2969           "List argument 'data' to 'parallel_dynamic_stitch' Op with length %d "
2970           "must match length %d of argument 'indices'." %
2971           (len(data), _attr_N))
2972     _, _, _op = _op_def_lib._apply_op_helper(
2973         "ParallelDynamicStitch", indices=indices, data=data, name=name)
2974     _result = _op.outputs[:]
2975     _inputs_flat = _op.inputs
2976     _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"))
2977     _execute.record_gradient(
2978       "ParallelDynamicStitch", _inputs_flat, _attrs, _result, name)
2979     _result, = _result
2980     return _result
2981 
2982   else:
2983     try:
2984       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2985         _ctx._context_handle, _ctx._eager_context.device_name,
2986         "ParallelDynamicStitch", name, _ctx._post_execution_callbacks,
2987         indices, data)
2988       return _result
2989     except _core._FallbackException:
2990       return parallel_dynamic_stitch_eager_fallback(
2991           indices, data, name=name, ctx=_ctx)
2992     except _core._NotOkStatusException as e:
2993       if name is not None:
2994         message = e.message + " name: " + name
2995       else:
2996         message = e.message
2997       _six.raise_from(_core._status_to_exception(e.code, message), None)
2998 
2999 
3000 def parallel_dynamic_stitch_eager_fallback(indices, data, name=None, ctx=None):
3001   r"""This is the slowpath function for Eager mode.
3002   This is for function parallel_dynamic_stitch
3003   """
3004   _ctx = ctx if ctx else _context.context()
3005   if not isinstance(indices, (list, tuple)):
3006     raise TypeError(
3007         "Expected list for 'indices' argument to "
3008         "'parallel_dynamic_stitch' Op, not %r." % indices)
3009   _attr_N = len(indices)
3010   if not isinstance(data, (list, tuple)):
3011     raise TypeError(
3012         "Expected list for 'data' argument to "
3013         "'parallel_dynamic_stitch' Op, not %r." % data)
3014   if len(data) != _attr_N:
3015     raise ValueError(
3016         "List argument 'data' to 'parallel_dynamic_stitch' Op with length %d "
3017         "must match length %d of argument 'indices'." %
3018         (len(data), _attr_N))
3019   _attr_T, data = _execute.args_to_matching_eager(list(data), _ctx)
3020   indices = _ops.convert_n_to_tensor(indices, _dtypes.int32)
3021   _inputs_flat = list(indices) + list(data)
3022   _attrs = ("N", _attr_N, "T", _attr_T)
3023   _result = _execute.execute(b"ParallelDynamicStitch", 1, inputs=_inputs_flat,
3024                              attrs=_attrs, ctx=_ctx, name=name)
3025   _execute.record_gradient(
3026       "ParallelDynamicStitch", _inputs_flat, _attrs, _result, name)
3027   _result, = _result
3028   return _result
3029 
3030 
3031 def priority_queue(shapes, component_types=[], capacity=-1, container="", shared_name="", name=None):
3032   r"""A queue that produces elements sorted by the first component value.
3033 
3034   Note that the PriorityQueue requires the first component of any element
3035   to be a scalar int64, in addition to the other elements declared by
3036   component_types.  Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
3037   and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
3038   entry in their input (resp. output) lists.
3039 
3040   Args:
3041     shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`).
3042       The shape of each component in a value. The length of this attr must
3043       be either 0 or the same as the length of component_types. If the length of
3044       this attr is 0, the shapes of queue elements are not constrained, and
3045       only one element may be dequeued at a time.
3046     component_types: An optional list of `tf.DTypes`. Defaults to `[]`.
3047       The type of each component in a value.
3048     capacity: An optional `int`. Defaults to `-1`.
3049       The upper bound on the number of elements in this queue.
3050       Negative numbers mean no limit.
3051     container: An optional `string`. Defaults to `""`.
3052       If non-empty, this queue is placed in the given container.
3053       Otherwise, a default container is used.
3054     shared_name: An optional `string`. Defaults to `""`.
3055       If non-empty, this queue will be shared under the given name
3056       across multiple sessions.
3057     name: A name for the operation (optional).
3058 
3059   Returns:
3060     A `Tensor` of type mutable `string`.
3061   """
3062   _ctx = _context._context
3063   if _ctx is None or not _ctx._eager_context.is_eager:
3064     if not isinstance(shapes, (list, tuple)):
3065       raise TypeError(
3066           "Expected list for 'shapes' argument to "
3067           "'priority_queue' Op, not %r." % shapes)
3068     shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
3069     if component_types is None:
3070       component_types = []
3071     if not isinstance(component_types, (list, tuple)):
3072       raise TypeError(
3073           "Expected list for 'component_types' argument to "
3074           "'priority_queue' Op, not %r." % component_types)
3075     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
3076     if capacity is None:
3077       capacity = -1
3078     capacity = _execute.make_int(capacity, "capacity")
3079     if container is None:
3080       container = ""
3081     container = _execute.make_str(container, "container")
3082     if shared_name is None:
3083       shared_name = ""
3084     shared_name = _execute.make_str(shared_name, "shared_name")
3085     _, _, _op = _op_def_lib._apply_op_helper(
3086         "PriorityQueue", shapes=shapes, component_types=component_types,
3087         capacity=capacity, container=container, shared_name=shared_name,
3088         name=name)
3089     _result = _op.outputs[:]
3090     _inputs_flat = _op.inputs
3091     _attrs = ("component_types", _op.get_attr("component_types"), "shapes",
3092               _op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
3093               "container", _op.get_attr("container"), "shared_name",
3094               _op.get_attr("shared_name"))
3095     _execute.record_gradient(
3096       "PriorityQueue", _inputs_flat, _attrs, _result, name)
3097     _result, = _result
3098     return _result
3099 
3100   else:
3101     raise RuntimeError("priority_queue op does not support eager execution. Arg 'handle' is a ref.")
3102 
3103 
3104   raise RuntimeError("priority_queue op does not support eager execution. Arg 'handle' is a ref.")
3105 
3106 def priority_queue_v2(shapes, component_types=[], capacity=-1, container="", shared_name="", name=None):
3107   r"""A queue that produces elements sorted by the first component value.
3108 
3109   Note that the PriorityQueue requires the first component of any element
3110   to be a scalar int64, in addition to the other elements declared by
3111   component_types.  Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
3112   and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
3113   entry in their input (resp. output) lists.
3114 
3115   Args:
3116     shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`).
3117       The shape of each component in a value. The length of this attr must
3118       be either 0 or the same as the length of component_types. If the length of
3119       this attr is 0, the shapes of queue elements are not constrained, and
3120       only one element may be dequeued at a time.
3121     component_types: An optional list of `tf.DTypes`. Defaults to `[]`.
3122       The type of each component in a value.
3123     capacity: An optional `int`. Defaults to `-1`.
3124       The upper bound on the number of elements in this queue.
3125       Negative numbers mean no limit.
3126     container: An optional `string`. Defaults to `""`.
3127       If non-empty, this queue is placed in the given container.
3128       Otherwise, a default container is used.
3129     shared_name: An optional `string`. Defaults to `""`.
3130       If non-empty, this queue will be shared under the given name
3131       across multiple sessions.
3132     name: A name for the operation (optional).
3133 
3134   Returns:
3135     A `Tensor` of type `resource`.
3136   """
3137   _ctx = _context._context
3138   if _ctx is None or not _ctx._eager_context.is_eager:
3139     if not isinstance(shapes, (list, tuple)):
3140       raise TypeError(
3141           "Expected list for 'shapes' argument to "
3142           "'priority_queue_v2' Op, not %r." % shapes)
3143     shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
3144     if component_types is None:
3145       component_types = []
3146     if not isinstance(component_types, (list, tuple)):
3147       raise TypeError(
3148           "Expected list for 'component_types' argument to "
3149           "'priority_queue_v2' Op, not %r." % component_types)
3150     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
3151     if capacity is None:
3152       capacity = -1
3153     capacity = _execute.make_int(capacity, "capacity")
3154     if container is None:
3155       container = ""
3156     container = _execute.make_str(container, "container")
3157     if shared_name is None:
3158       shared_name = ""
3159     shared_name = _execute.make_str(shared_name, "shared_name")
3160     _, _, _op = _op_def_lib._apply_op_helper(
3161         "PriorityQueueV2", shapes=shapes, component_types=component_types,
3162         capacity=capacity, container=container, shared_name=shared_name,
3163         name=name)
3164     _result = _op.outputs[:]
3165     _inputs_flat = _op.inputs
3166     _attrs = ("component_types", _op.get_attr("component_types"), "shapes",
3167               _op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
3168               "container", _op.get_attr("container"), "shared_name",
3169               _op.get_attr("shared_name"))
3170     _execute.record_gradient(
3171       "PriorityQueueV2", _inputs_flat, _attrs, _result, name)
3172     _result, = _result
3173     return _result
3174 
3175   else:
3176     try:
3177       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3178         _ctx._context_handle, _ctx._eager_context.device_name,
3179         "PriorityQueueV2", name, _ctx._post_execution_callbacks,
3180         "component_types", component_types, "shapes", shapes, "capacity",
3181         capacity, "container", container, "shared_name", shared_name)
3182       return _result
3183     except _core._FallbackException:
3184       return priority_queue_v2_eager_fallback(
3185           component_types=component_types, shapes=shapes, capacity=capacity,
3186           container=container, shared_name=shared_name, name=name, ctx=_ctx)
3187     except _core._NotOkStatusException as e:
3188       if name is not None:
3189         message = e.message + " name: " + name
3190       else:
3191         message = e.message
3192       _six.raise_from(_core._status_to_exception(e.code, message), None)
3193 
3194 
3195 def priority_queue_v2_eager_fallback(shapes, component_types=[], capacity=-1, container="", shared_name="", name=None, ctx=None):
3196   r"""This is the slowpath function for Eager mode.
3197   This is for function priority_queue_v2
3198   """
3199   _ctx = ctx if ctx else _context.context()
3200   if not isinstance(shapes, (list, tuple)):
3201     raise TypeError(
3202         "Expected list for 'shapes' argument to "
3203         "'priority_queue_v2' Op, not %r." % shapes)
3204   shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
3205   if component_types is None:
3206     component_types = []
3207   if not isinstance(component_types, (list, tuple)):
3208     raise TypeError(
3209         "Expected list for 'component_types' argument to "
3210         "'priority_queue_v2' Op, not %r." % component_types)
3211   component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
3212   if capacity is None:
3213     capacity = -1
3214   capacity = _execute.make_int(capacity, "capacity")
3215   if container is None:
3216     container = ""
3217   container = _execute.make_str(container, "container")
3218   if shared_name is None:
3219     shared_name = ""
3220   shared_name = _execute.make_str(shared_name, "shared_name")
3221   _inputs_flat = []
3222   _attrs = ("component_types", component_types, "shapes", shapes, "capacity",
3223   capacity, "container", container, "shared_name", shared_name)
3224   _result = _execute.execute(b"PriorityQueueV2", 1, inputs=_inputs_flat,
3225                              attrs=_attrs, ctx=_ctx, name=name)
3226   _execute.record_gradient(
3227       "PriorityQueueV2", _inputs_flat, _attrs, _result, name)
3228   _result, = _result
3229   return _result
3230 
3231 
3232 def queue_close(handle, cancel_pending_enqueues=False, name=None):
3233   r"""Closes the given queue.
3234 
3235   This operation signals that no more elements will be enqueued in the
3236   given queue. Subsequent Enqueue(Many) operations will fail.
3237   Subsequent Dequeue(Many) operations will continue to succeed if
3238   sufficient elements remain in the queue. Subsequent Dequeue(Many)
3239   operations that would block will fail immediately.
3240 
3241   Args:
3242     handle: A `Tensor` of type mutable `string`. The handle to a queue.
3243     cancel_pending_enqueues: An optional `bool`. Defaults to `False`.
3244       If true, all pending enqueue requests that are
3245       blocked on the given queue will be canceled.
3246     name: A name for the operation (optional).
3247 
3248   Returns:
3249     The created Operation.
3250   """
3251   _ctx = _context._context
3252   if _ctx is None or not _ctx._eager_context.is_eager:
3253     if cancel_pending_enqueues is None:
3254       cancel_pending_enqueues = False
3255     cancel_pending_enqueues = _execute.make_bool(cancel_pending_enqueues, "cancel_pending_enqueues")
3256     _, _, _op = _op_def_lib._apply_op_helper(
3257         "QueueClose", handle=handle,
3258         cancel_pending_enqueues=cancel_pending_enqueues, name=name)
3259     return _op
3260     _result = None
3261     return _result
3262 
3263   else:
3264     raise RuntimeError("queue_close op does not support eager execution. Arg 'handle' is a ref.")
3265 
3266 
3267   raise RuntimeError("queue_close op does not support eager execution. Arg 'handle' is a ref.")
3268 
3269 def queue_close_v2(handle, cancel_pending_enqueues=False, name=None):
3270   r"""Closes the given queue.
3271 
3272   This operation signals that no more elements will be enqueued in the
3273   given queue. Subsequent Enqueue(Many) operations will fail.
3274   Subsequent Dequeue(Many) operations will continue to succeed if
3275   sufficient elements remain in the queue. Subsequent Dequeue(Many)
3276   operations that would block will fail immediately.
3277 
3278   Args:
3279     handle: A `Tensor` of type `resource`. The handle to a queue.
3280     cancel_pending_enqueues: An optional `bool`. Defaults to `False`.
3281       If true, all pending enqueue requests that are
3282       blocked on the given queue will be canceled.
3283     name: A name for the operation (optional).
3284 
3285   Returns:
3286     The created Operation.
3287   """
3288   _ctx = _context._context
3289   if _ctx is None or not _ctx._eager_context.is_eager:
3290     if cancel_pending_enqueues is None:
3291       cancel_pending_enqueues = False
3292     cancel_pending_enqueues = _execute.make_bool(cancel_pending_enqueues, "cancel_pending_enqueues")
3293     _, _, _op = _op_def_lib._apply_op_helper(
3294         "QueueCloseV2", handle=handle,
3295         cancel_pending_enqueues=cancel_pending_enqueues, name=name)
3296     return _op
3297     _result = None
3298     return _result
3299 
3300   else:
3301     try:
3302       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3303         _ctx._context_handle, _ctx._eager_context.device_name, "QueueCloseV2",
3304         name, _ctx._post_execution_callbacks, handle,
3305         "cancel_pending_enqueues", cancel_pending_enqueues)
3306       return _result
3307     except _core._FallbackException:
3308       return queue_close_v2_eager_fallback(
3309           handle, cancel_pending_enqueues=cancel_pending_enqueues, name=name,
3310           ctx=_ctx)
3311     except _core._NotOkStatusException as e:
3312       if name is not None:
3313         message = e.message + " name: " + name
3314       else:
3315         message = e.message
3316       _six.raise_from(_core._status_to_exception(e.code, message), None)
3317 
3318 
3319 def queue_close_v2_eager_fallback(handle, cancel_pending_enqueues=False, name=None, ctx=None):
3320   r"""This is the slowpath function for Eager mode.
3321   This is for function queue_close_v2
3322   """
3323   _ctx = ctx if ctx else _context.context()
3324   if cancel_pending_enqueues is None:
3325     cancel_pending_enqueues = False
3326   cancel_pending_enqueues = _execute.make_bool(cancel_pending_enqueues, "cancel_pending_enqueues")
3327   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
3328   _inputs_flat = [handle]
3329   _attrs = ("cancel_pending_enqueues", cancel_pending_enqueues)
3330   _result = _execute.execute(b"QueueCloseV2", 0, inputs=_inputs_flat,
3331                              attrs=_attrs, ctx=_ctx, name=name)
3332   _result = None
3333   return _result
3334 
3335 
3336 def queue_dequeue(handle, component_types, timeout_ms=-1, name=None):
3337   r"""Dequeues a tuple of one or more tensors from the given queue.
3338 
3339   This operation has k outputs, where k is the number of components
3340   in the tuples stored in the given queue, and output i is the ith
3341   component of the dequeued tuple.
3342 
3343   N.B. If the queue is empty, this operation will block until an element
3344   has been dequeued (or 'timeout_ms' elapses, if specified).
3345 
3346   Args:
3347     handle: A `Tensor` of type mutable `string`. The handle to a queue.
3348     component_types: A list of `tf.DTypes` that has length `>= 1`.
3349       The type of each component in a tuple.
3350     timeout_ms: An optional `int`. Defaults to `-1`.
3351       If the queue is empty, this operation will block for up to
3352       timeout_ms milliseconds.
3353       Note: This option is not supported yet.
3354     name: A name for the operation (optional).
3355 
3356   Returns:
3357     A list of `Tensor` objects of type `component_types`.
3358   """
3359   _ctx = _context._context
3360   if _ctx is None or not _ctx._eager_context.is_eager:
3361     if not isinstance(component_types, (list, tuple)):
3362       raise TypeError(
3363           "Expected list for 'component_types' argument to "
3364           "'queue_dequeue' Op, not %r." % component_types)
3365     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
3366     if timeout_ms is None:
3367       timeout_ms = -1
3368     timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
3369     _, _, _op = _op_def_lib._apply_op_helper(
3370         "QueueDequeue", handle=handle, component_types=component_types,
3371         timeout_ms=timeout_ms, name=name)
3372     _result = _op.outputs[:]
3373     _inputs_flat = _op.inputs
3374     _attrs = ("component_types", _op.get_attr("component_types"),
3375               "timeout_ms", _op.get_attr("timeout_ms"))
3376     _execute.record_gradient(
3377       "QueueDequeue", _inputs_flat, _attrs, _result, name)
3378     return _result
3379 
3380   else:
3381     raise RuntimeError("queue_dequeue op does not support eager execution. Arg 'handle' is a ref.")
3382 
3383 
3384   raise RuntimeError("queue_dequeue op does not support eager execution. Arg 'handle' is a ref.")
3385 
3386 def queue_dequeue_many(handle, n, component_types, timeout_ms=-1, name=None):
3387   r"""Dequeues `n` tuples of one or more tensors from the given queue.
3388 
3389   If the queue is closed and there are fewer than `n` elements, then an
3390   OutOfRange error is returned.
3391 
3392   This operation concatenates queue-element component tensors along the
3393   0th dimension to make a single component tensor.  All of the components
3394   in the dequeued tuple will have size `n` in the 0th dimension.
3395 
3396   This operation has `k` outputs, where `k` is the number of components in
3397   the tuples stored in the given queue, and output `i` is the ith
3398   component of the dequeued tuple.
3399 
3400   N.B. If the queue is empty, this operation will block until `n` elements
3401   have been dequeued (or 'timeout_ms' elapses, if specified).
3402 
3403   Args:
3404     handle: A `Tensor` of type mutable `string`. The handle to a queue.
3405     n: A `Tensor` of type `int32`. The number of tuples to dequeue.
3406     component_types: A list of `tf.DTypes` that has length `>= 1`.
3407       The type of each component in a tuple.
3408     timeout_ms: An optional `int`. Defaults to `-1`.
3409       If the queue has fewer than n elements, this operation
3410       will block for up to timeout_ms milliseconds.
3411       Note: This option is not supported yet.
3412     name: A name for the operation (optional).
3413 
3414   Returns:
3415     A list of `Tensor` objects of type `component_types`.
3416   """
3417   _ctx = _context._context
3418   if _ctx is None or not _ctx._eager_context.is_eager:
3419     if not isinstance(component_types, (list, tuple)):
3420       raise TypeError(
3421           "Expected list for 'component_types' argument to "
3422           "'queue_dequeue_many' Op, not %r." % component_types)
3423     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
3424     if timeout_ms is None:
3425       timeout_ms = -1
3426     timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
3427     _, _, _op = _op_def_lib._apply_op_helper(
3428         "QueueDequeueMany", handle=handle, n=n,
3429         component_types=component_types, timeout_ms=timeout_ms, name=name)
3430     _result = _op.outputs[:]
3431     _inputs_flat = _op.inputs
3432     _attrs = ("component_types", _op.get_attr("component_types"),
3433               "timeout_ms", _op.get_attr("timeout_ms"))
3434     _execute.record_gradient(
3435       "QueueDequeueMany", _inputs_flat, _attrs, _result, name)
3436     return _result
3437 
3438   else:
3439     raise RuntimeError("queue_dequeue_many op does not support eager execution. Arg 'handle' is a ref.")
3440 
3441 
3442   raise RuntimeError("queue_dequeue_many op does not support eager execution. Arg 'handle' is a ref.")
3443 
3444 def queue_dequeue_many_v2(handle, n, component_types, timeout_ms=-1, name=None):
3445   r"""Dequeues `n` tuples of one or more tensors from the given queue.
3446 
3447   If the queue is closed and there are fewer than `n` elements, then an
3448   OutOfRange error is returned.
3449 
3450   This operation concatenates queue-element component tensors along the
3451   0th dimension to make a single component tensor.  All of the components
3452   in the dequeued tuple will have size `n` in the 0th dimension.
3453 
3454   This operation has `k` outputs, where `k` is the number of components in
3455   the tuples stored in the given queue, and output `i` is the ith
3456   component of the dequeued tuple.
3457 
3458   N.B. If the queue is empty, this operation will block until `n` elements
3459   have been dequeued (or 'timeout_ms' elapses, if specified).
3460 
3461   Args:
3462     handle: A `Tensor` of type `resource`. The handle to a queue.
3463     n: A `Tensor` of type `int32`. The number of tuples to dequeue.
3464     component_types: A list of `tf.DTypes` that has length `>= 1`.
3465       The type of each component in a tuple.
3466     timeout_ms: An optional `int`. Defaults to `-1`.
3467       If the queue has fewer than n elements, this operation
3468       will block for up to timeout_ms milliseconds.
3469       Note: This option is not supported yet.
3470     name: A name for the operation (optional).
3471 
3472   Returns:
3473     A list of `Tensor` objects of type `component_types`.
3474   """
3475   _ctx = _context._context
3476   if _ctx is None or not _ctx._eager_context.is_eager:
3477     if not isinstance(component_types, (list, tuple)):
3478       raise TypeError(
3479           "Expected list for 'component_types' argument to "
3480           "'queue_dequeue_many_v2' Op, not %r." % component_types)
3481     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
3482     if timeout_ms is None:
3483       timeout_ms = -1
3484     timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
3485     _, _, _op = _op_def_lib._apply_op_helper(
3486         "QueueDequeueManyV2", handle=handle, n=n,
3487         component_types=component_types, timeout_ms=timeout_ms, name=name)
3488     _result = _op.outputs[:]
3489     if not _result:
3490       return _op
3491     _inputs_flat = _op.inputs
3492     _attrs = ("component_types", _op.get_attr("component_types"),
3493               "timeout_ms", _op.get_attr("timeout_ms"))
3494     _execute.record_gradient(
3495       "QueueDequeueManyV2", _inputs_flat, _attrs, _result, name)
3496     return _result
3497 
3498   else:
3499     try:
3500       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3501         _ctx._context_handle, _ctx._eager_context.device_name,
3502         "QueueDequeueManyV2", name, _ctx._post_execution_callbacks, handle, n,
3503         "component_types", component_types, "timeout_ms", timeout_ms)
3504       return _result
3505     except _core._FallbackException:
3506       return queue_dequeue_many_v2_eager_fallback(
3507           handle, n, component_types=component_types, timeout_ms=timeout_ms,
3508           name=name, ctx=_ctx)
3509     except _core._NotOkStatusException as e:
3510       if name is not None:
3511         message = e.message + " name: " + name
3512       else:
3513         message = e.message
3514       _six.raise_from(_core._status_to_exception(e.code, message), None)
3515 
3516 
3517 def queue_dequeue_many_v2_eager_fallback(handle, n, component_types, timeout_ms=-1, name=None, ctx=None):
3518   r"""This is the slowpath function for Eager mode.
3519   This is for function queue_dequeue_many_v2
3520   """
3521   _ctx = ctx if ctx else _context.context()
3522   if not isinstance(component_types, (list, tuple)):
3523     raise TypeError(
3524         "Expected list for 'component_types' argument to "
3525         "'queue_dequeue_many_v2' Op, not %r." % component_types)
3526   component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
3527   if timeout_ms is None:
3528     timeout_ms = -1
3529   timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
3530   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
3531   n = _ops.convert_to_tensor(n, _dtypes.int32)
3532   _inputs_flat = [handle, n]
3533   _attrs = ("component_types", component_types, "timeout_ms", timeout_ms)
3534   _result = _execute.execute(b"QueueDequeueManyV2", len(component_types),
3535                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
3536                              name=name)
3537   _execute.record_gradient(
3538       "QueueDequeueManyV2", _inputs_flat, _attrs, _result, name)
3539   return _result
3540 
3541 
3542 def queue_dequeue_up_to(handle, n, component_types, timeout_ms=-1, name=None):
3543   r"""Dequeues `n` tuples of one or more tensors from the given queue.
3544 
3545   This operation is not supported by all queues.  If a queue does not support
3546   DequeueUpTo, then an Unimplemented error is returned.
3547 
3548   If the queue is closed and there are more than 0 but less than `n`
3549   elements remaining, then instead of returning an OutOfRange error like
3550   QueueDequeueMany, less than `n` elements are returned immediately.  If
3551   the queue is closed and there are 0 elements left in the queue, then
3552   an OutOfRange error is returned just like in QueueDequeueMany.
3553   Otherwise the behavior is identical to QueueDequeueMany:
3554 
3555   This operation concatenates queue-element component tensors along the
3556   0th dimension to make a single component tensor.  All of the components
3557   in the dequeued tuple will have size `n` in the 0th dimension.
3558 
3559   This operation has k outputs, where `k` is the number of components in
3560   the tuples stored in the given queue, and output `i` is the ith
3561   component of the dequeued tuple.
3562 
3563   Args:
3564     handle: A `Tensor` of type mutable `string`. The handle to a queue.
3565     n: A `Tensor` of type `int32`. The number of tuples to dequeue.
3566     component_types: A list of `tf.DTypes` that has length `>= 1`.
3567       The type of each component in a tuple.
3568     timeout_ms: An optional `int`. Defaults to `-1`.
3569       If the queue has fewer than n elements, this operation
3570       will block for up to timeout_ms milliseconds.
3571       Note: This option is not supported yet.
3572     name: A name for the operation (optional).
3573 
3574   Returns:
3575     A list of `Tensor` objects of type `component_types`.
3576   """
3577   _ctx = _context._context
3578   if _ctx is None or not _ctx._eager_context.is_eager:
3579     if not isinstance(component_types, (list, tuple)):
3580       raise TypeError(
3581           "Expected list for 'component_types' argument to "
3582           "'queue_dequeue_up_to' Op, not %r." % component_types)
3583     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
3584     if timeout_ms is None:
3585       timeout_ms = -1
3586     timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
3587     _, _, _op = _op_def_lib._apply_op_helper(
3588         "QueueDequeueUpTo", handle=handle, n=n,
3589         component_types=component_types, timeout_ms=timeout_ms, name=name)
3590     _result = _op.outputs[:]
3591     _inputs_flat = _op.inputs
3592     _attrs = ("component_types", _op.get_attr("component_types"),
3593               "timeout_ms", _op.get_attr("timeout_ms"))
3594     _execute.record_gradient(
3595       "QueueDequeueUpTo", _inputs_flat, _attrs, _result, name)
3596     return _result
3597 
3598   else:
3599     raise RuntimeError("queue_dequeue_up_to op does not support eager execution. Arg 'handle' is a ref.")
3600 
3601 
3602   raise RuntimeError("queue_dequeue_up_to op does not support eager execution. Arg 'handle' is a ref.")
3603 
3604 def queue_dequeue_up_to_v2(handle, n, component_types, timeout_ms=-1, name=None):
3605   r"""Dequeues `n` tuples of one or more tensors from the given queue.
3606 
3607   This operation is not supported by all queues.  If a queue does not support
3608   DequeueUpTo, then an Unimplemented error is returned.
3609 
3610   If the queue is closed and there are more than 0 but less than `n`
3611   elements remaining, then instead of returning an OutOfRange error like
3612   QueueDequeueMany, less than `n` elements are returned immediately.  If
3613   the queue is closed and there are 0 elements left in the queue, then
3614   an OutOfRange error is returned just like in QueueDequeueMany.
3615   Otherwise the behavior is identical to QueueDequeueMany:
3616 
3617   This operation concatenates queue-element component tensors along the
3618   0th dimension to make a single component tensor.  All of the components
3619   in the dequeued tuple will have size n in the 0th dimension.
3620 
3621   This operation has `k` outputs, where `k` is the number of components in
3622   the tuples stored in the given queue, and output `i` is the ith
3623   component of the dequeued tuple.
3624 
3625   Args:
3626     handle: A `Tensor` of type `resource`. The handle to a queue.
3627     n: A `Tensor` of type `int32`. The number of tuples to dequeue.
3628     component_types: A list of `tf.DTypes` that has length `>= 1`.
3629       The type of each component in a tuple.
3630     timeout_ms: An optional `int`. Defaults to `-1`.
3631       If the queue has fewer than n elements, this operation
3632       will block for up to timeout_ms milliseconds.
3633       Note: This option is not supported yet.
3634     name: A name for the operation (optional).
3635 
3636   Returns:
3637     A list of `Tensor` objects of type `component_types`.
3638   """
3639   _ctx = _context._context
3640   if _ctx is None or not _ctx._eager_context.is_eager:
3641     if not isinstance(component_types, (list, tuple)):
3642       raise TypeError(
3643           "Expected list for 'component_types' argument to "
3644           "'queue_dequeue_up_to_v2' Op, not %r." % component_types)
3645     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
3646     if timeout_ms is None:
3647       timeout_ms = -1
3648     timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
3649     _, _, _op = _op_def_lib._apply_op_helper(
3650         "QueueDequeueUpToV2", handle=handle, n=n,
3651         component_types=component_types, timeout_ms=timeout_ms, name=name)
3652     _result = _op.outputs[:]
3653     if not _result:
3654       return _op
3655     _inputs_flat = _op.inputs
3656     _attrs = ("component_types", _op.get_attr("component_types"),
3657               "timeout_ms", _op.get_attr("timeout_ms"))
3658     _execute.record_gradient(
3659       "QueueDequeueUpToV2", _inputs_flat, _attrs, _result, name)
3660     return _result
3661 
3662   else:
3663     try:
3664       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3665         _ctx._context_handle, _ctx._eager_context.device_name,
3666         "QueueDequeueUpToV2", name, _ctx._post_execution_callbacks, handle, n,
3667         "component_types", component_types, "timeout_ms", timeout_ms)
3668       return _result
3669     except _core._FallbackException:
3670       return queue_dequeue_up_to_v2_eager_fallback(
3671           handle, n, component_types=component_types, timeout_ms=timeout_ms,
3672           name=name, ctx=_ctx)
3673     except _core._NotOkStatusException as e:
3674       if name is not None:
3675         message = e.message + " name: " + name
3676       else:
3677         message = e.message
3678       _six.raise_from(_core._status_to_exception(e.code, message), None)
3679 
3680 
3681 def queue_dequeue_up_to_v2_eager_fallback(handle, n, component_types, timeout_ms=-1, name=None, ctx=None):
3682   r"""This is the slowpath function for Eager mode.
3683   This is for function queue_dequeue_up_to_v2
3684   """
3685   _ctx = ctx if ctx else _context.context()
3686   if not isinstance(component_types, (list, tuple)):
3687     raise TypeError(
3688         "Expected list for 'component_types' argument to "
3689         "'queue_dequeue_up_to_v2' Op, not %r." % component_types)
3690   component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
3691   if timeout_ms is None:
3692     timeout_ms = -1
3693   timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
3694   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
3695   n = _ops.convert_to_tensor(n, _dtypes.int32)
3696   _inputs_flat = [handle, n]
3697   _attrs = ("component_types", component_types, "timeout_ms", timeout_ms)
3698   _result = _execute.execute(b"QueueDequeueUpToV2", len(component_types),
3699                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
3700                              name=name)
3701   _execute.record_gradient(
3702       "QueueDequeueUpToV2", _inputs_flat, _attrs, _result, name)
3703   return _result
3704 
3705 
3706 def queue_dequeue_v2(handle, component_types, timeout_ms=-1, name=None):
3707   r"""Dequeues a tuple of one or more tensors from the given queue.
3708 
3709   This operation has k outputs, where k is the number of components
3710   in the tuples stored in the given queue, and output i is the ith
3711   component of the dequeued tuple.
3712 
3713   N.B. If the queue is empty, this operation will block until an element
3714   has been dequeued (or 'timeout_ms' elapses, if specified).
3715 
3716   Args:
3717     handle: A `Tensor` of type `resource`. The handle to a queue.
3718     component_types: A list of `tf.DTypes` that has length `>= 1`.
3719       The type of each component in a tuple.
3720     timeout_ms: An optional `int`. Defaults to `-1`.
3721       If the queue is empty, this operation will block for up to
3722       timeout_ms milliseconds.
3723       Note: This option is not supported yet.
3724     name: A name for the operation (optional).
3725 
3726   Returns:
3727     A list of `Tensor` objects of type `component_types`.
3728   """
3729   _ctx = _context._context
3730   if _ctx is None or not _ctx._eager_context.is_eager:
3731     if not isinstance(component_types, (list, tuple)):
3732       raise TypeError(
3733           "Expected list for 'component_types' argument to "
3734           "'queue_dequeue_v2' Op, not %r." % component_types)
3735     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
3736     if timeout_ms is None:
3737       timeout_ms = -1
3738     timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
3739     _, _, _op = _op_def_lib._apply_op_helper(
3740         "QueueDequeueV2", handle=handle, component_types=component_types,
3741         timeout_ms=timeout_ms, name=name)
3742     _result = _op.outputs[:]
3743     if not _result:
3744       return _op
3745     _inputs_flat = _op.inputs
3746     _attrs = ("component_types", _op.get_attr("component_types"),
3747               "timeout_ms", _op.get_attr("timeout_ms"))
3748     _execute.record_gradient(
3749       "QueueDequeueV2", _inputs_flat, _attrs, _result, name)
3750     return _result
3751 
3752   else:
3753     try:
3754       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3755         _ctx._context_handle, _ctx._eager_context.device_name,
3756         "QueueDequeueV2", name, _ctx._post_execution_callbacks, handle,
3757         "component_types", component_types, "timeout_ms", timeout_ms)
3758       return _result
3759     except _core._FallbackException:
3760       return queue_dequeue_v2_eager_fallback(
3761           handle, component_types=component_types, timeout_ms=timeout_ms,
3762           name=name, ctx=_ctx)
3763     except _core._NotOkStatusException as e:
3764       if name is not None:
3765         message = e.message + " name: " + name
3766       else:
3767         message = e.message
3768       _six.raise_from(_core._status_to_exception(e.code, message), None)
3769 
3770 
3771 def queue_dequeue_v2_eager_fallback(handle, component_types, timeout_ms=-1, name=None, ctx=None):
3772   r"""This is the slowpath function for Eager mode.
3773   This is for function queue_dequeue_v2
3774   """
3775   _ctx = ctx if ctx else _context.context()
3776   if not isinstance(component_types, (list, tuple)):
3777     raise TypeError(
3778         "Expected list for 'component_types' argument to "
3779         "'queue_dequeue_v2' Op, not %r." % component_types)
3780   component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
3781   if timeout_ms is None:
3782     timeout_ms = -1
3783   timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
3784   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
3785   _inputs_flat = [handle]
3786   _attrs = ("component_types", component_types, "timeout_ms", timeout_ms)
3787   _result = _execute.execute(b"QueueDequeueV2", len(component_types),
3788                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
3789                              name=name)
3790   _execute.record_gradient(
3791       "QueueDequeueV2", _inputs_flat, _attrs, _result, name)
3792   return _result
3793 
3794 
3795 def queue_enqueue(handle, components, timeout_ms=-1, name=None):
3796   r"""Enqueues a tuple of one or more tensors in the given queue.
3797 
3798   The components input has k elements, which correspond to the components of
3799   tuples stored in the given queue.
3800 
3801   N.B. If the queue is full, this operation will block until the given
3802   element has been enqueued (or 'timeout_ms' elapses, if specified).
3803 
3804   Args:
3805     handle: A `Tensor` of type mutable `string`. The handle to a queue.
3806     components: A list of `Tensor` objects.
3807       One or more tensors from which the enqueued tensors should be taken.
3808     timeout_ms: An optional `int`. Defaults to `-1`.
3809       If the queue is full, this operation will block for up to
3810       timeout_ms milliseconds.
3811       Note: This option is not supported yet.
3812     name: A name for the operation (optional).
3813 
3814   Returns:
3815     The created Operation.
3816   """
3817   _ctx = _context._context
3818   if _ctx is None or not _ctx._eager_context.is_eager:
3819     if timeout_ms is None:
3820       timeout_ms = -1
3821     timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
3822     _, _, _op = _op_def_lib._apply_op_helper(
3823         "QueueEnqueue", handle=handle, components=components,
3824         timeout_ms=timeout_ms, name=name)
3825     return _op
3826     _result = None
3827     return _result
3828 
3829   else:
3830     raise RuntimeError("queue_enqueue op does not support eager execution. Arg 'handle' is a ref.")
3831 
3832 
3833   raise RuntimeError("queue_enqueue op does not support eager execution. Arg 'handle' is a ref.")
3834 
3835 def queue_enqueue_many(handle, components, timeout_ms=-1, name=None):
3836   r"""Enqueues zero or more tuples of one or more tensors in the given queue.
3837 
3838   This operation slices each component tensor along the 0th dimension to
3839   make multiple queue elements. All of the tuple components must have the
3840   same size in the 0th dimension.
3841 
3842   The components input has k elements, which correspond to the components of
3843   tuples stored in the given queue.
3844 
3845   N.B. If the queue is full, this operation will block until the given
3846   elements have been enqueued (or 'timeout_ms' elapses, if specified).
3847 
3848   Args:
3849     handle: A `Tensor` of type mutable `string`. The handle to a queue.
3850     components: A list of `Tensor` objects.
3851       One or more tensors from which the enqueued tensors should
3852       be taken.
3853     timeout_ms: An optional `int`. Defaults to `-1`.
3854       If the queue is too full, this operation will block for up
3855       to timeout_ms milliseconds.
3856       Note: This option is not supported yet.
3857     name: A name for the operation (optional).
3858 
3859   Returns:
3860     The created Operation.
3861   """
3862   _ctx = _context._context
3863   if _ctx is None or not _ctx._eager_context.is_eager:
3864     if timeout_ms is None:
3865       timeout_ms = -1
3866     timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
3867     _, _, _op = _op_def_lib._apply_op_helper(
3868         "QueueEnqueueMany", handle=handle, components=components,
3869         timeout_ms=timeout_ms, name=name)
3870     return _op
3871     _result = None
3872     return _result
3873 
3874   else:
3875     raise RuntimeError("queue_enqueue_many op does not support eager execution. Arg 'handle' is a ref.")
3876 
3877 
3878   raise RuntimeError("queue_enqueue_many op does not support eager execution. Arg 'handle' is a ref.")
3879 
3880 def queue_enqueue_many_v2(handle, components, timeout_ms=-1, name=None):
3881   r"""Enqueues zero or more tuples of one or more tensors in the given queue.
3882 
3883   This operation slices each component tensor along the 0th dimension to
3884   make multiple queue elements. All of the tuple components must have the
3885   same size in the 0th dimension.
3886 
3887   The components input has k elements, which correspond to the components of
3888   tuples stored in the given queue.
3889 
3890   N.B. If the queue is full, this operation will block until the given
3891   elements have been enqueued (or 'timeout_ms' elapses, if specified).
3892 
3893   Args:
3894     handle: A `Tensor` of type `resource`. The handle to a queue.
3895     components: A list of `Tensor` objects.
3896       One or more tensors from which the enqueued tensors should
3897       be taken.
3898     timeout_ms: An optional `int`. Defaults to `-1`.
3899       If the queue is too full, this operation will block for up
3900       to timeout_ms milliseconds.
3901       Note: This option is not supported yet.
3902     name: A name for the operation (optional).
3903 
3904   Returns:
3905     The created Operation.
3906   """
3907   _ctx = _context._context
3908   if _ctx is None or not _ctx._eager_context.is_eager:
3909     if timeout_ms is None:
3910       timeout_ms = -1
3911     timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
3912     _, _, _op = _op_def_lib._apply_op_helper(
3913         "QueueEnqueueManyV2", handle=handle, components=components,
3914         timeout_ms=timeout_ms, name=name)
3915     return _op
3916     _result = None
3917     return _result
3918 
3919   else:
3920     try:
3921       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3922         _ctx._context_handle, _ctx._eager_context.device_name,
3923         "QueueEnqueueManyV2", name, _ctx._post_execution_callbacks, handle,
3924         components, "timeout_ms", timeout_ms)
3925       return _result
3926     except _core._FallbackException:
3927       return queue_enqueue_many_v2_eager_fallback(
3928           handle, components, timeout_ms=timeout_ms, name=name, ctx=_ctx)
3929     except _core._NotOkStatusException as e:
3930       if name is not None:
3931         message = e.message + " name: " + name
3932       else:
3933         message = e.message
3934       _six.raise_from(_core._status_to_exception(e.code, message), None)
3935 
3936 
3937 def queue_enqueue_many_v2_eager_fallback(handle, components, timeout_ms=-1, name=None, ctx=None):
3938   r"""This is the slowpath function for Eager mode.
3939   This is for function queue_enqueue_many_v2
3940   """
3941   _ctx = ctx if ctx else _context.context()
3942   if timeout_ms is None:
3943     timeout_ms = -1
3944   timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
3945   _attr_Tcomponents, components = _execute.convert_to_mixed_eager_tensors(components, _ctx)
3946   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
3947   _inputs_flat = [handle] + list(components)
3948   _attrs = ("Tcomponents", _attr_Tcomponents, "timeout_ms", timeout_ms)
3949   _result = _execute.execute(b"QueueEnqueueManyV2", 0, inputs=_inputs_flat,
3950                              attrs=_attrs, ctx=_ctx, name=name)
3951   _result = None
3952   return _result
3953 
3954 
3955 def queue_enqueue_v2(handle, components, timeout_ms=-1, name=None):
3956   r"""Enqueues a tuple of one or more tensors in the given queue.
3957 
3958   The components input has k elements, which correspond to the components of
3959   tuples stored in the given queue.
3960 
3961   N.B. If the queue is full, this operation will block until the given
3962   element has been enqueued (or 'timeout_ms' elapses, if specified).
3963 
3964   Args:
3965     handle: A `Tensor` of type `resource`. The handle to a queue.
3966     components: A list of `Tensor` objects.
3967       One or more tensors from which the enqueued tensors should be taken.
3968     timeout_ms: An optional `int`. Defaults to `-1`.
3969       If the queue is full, this operation will block for up to
3970       timeout_ms milliseconds.
3971       Note: This option is not supported yet.
3972     name: A name for the operation (optional).
3973 
3974   Returns:
3975     The created Operation.
3976   """
3977   _ctx = _context._context
3978   if _ctx is None or not _ctx._eager_context.is_eager:
3979     if timeout_ms is None:
3980       timeout_ms = -1
3981     timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
3982     _, _, _op = _op_def_lib._apply_op_helper(
3983         "QueueEnqueueV2", handle=handle, components=components,
3984         timeout_ms=timeout_ms, name=name)
3985     return _op
3986     _result = None
3987     return _result
3988 
3989   else:
3990     try:
3991       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3992         _ctx._context_handle, _ctx._eager_context.device_name,
3993         "QueueEnqueueV2", name, _ctx._post_execution_callbacks, handle,
3994         components, "timeout_ms", timeout_ms)
3995       return _result
3996     except _core._FallbackException:
3997       return queue_enqueue_v2_eager_fallback(
3998           handle, components, timeout_ms=timeout_ms, name=name, ctx=_ctx)
3999     except _core._NotOkStatusException as e:
4000       if name is not None:
4001         message = e.message + " name: " + name
4002       else:
4003         message = e.message
4004       _six.raise_from(_core._status_to_exception(e.code, message), None)
4005 
4006 
4007 def queue_enqueue_v2_eager_fallback(handle, components, timeout_ms=-1, name=None, ctx=None):
4008   r"""This is the slowpath function for Eager mode.
4009   This is for function queue_enqueue_v2
4010   """
4011   _ctx = ctx if ctx else _context.context()
4012   if timeout_ms is None:
4013     timeout_ms = -1
4014   timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
4015   _attr_Tcomponents, components = _execute.convert_to_mixed_eager_tensors(components, _ctx)
4016   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
4017   _inputs_flat = [handle] + list(components)
4018   _attrs = ("Tcomponents", _attr_Tcomponents, "timeout_ms", timeout_ms)
4019   _result = _execute.execute(b"QueueEnqueueV2", 0, inputs=_inputs_flat,
4020                              attrs=_attrs, ctx=_ctx, name=name)
4021   _result = None
4022   return _result
4023 
4024 
4025 def queue_is_closed(handle, name=None):
4026   r"""Returns true if queue is closed.
4027 
4028   This operation returns true if the queue is closed and false if the queue
4029   is open.
4030 
4031   Args:
4032     handle: A `Tensor` of type mutable `string`. The handle to a queue.
4033     name: A name for the operation (optional).
4034 
4035   Returns:
4036     A `Tensor` of type `bool`.
4037   """
4038   _ctx = _context._context
4039   if _ctx is None or not _ctx._eager_context.is_eager:
4040     _, _, _op = _op_def_lib._apply_op_helper(
4041         "QueueIsClosed", handle=handle, name=name)
4042     _result = _op.outputs[:]
4043     _inputs_flat = _op.inputs
4044     _attrs = None
4045     _execute.record_gradient(
4046       "QueueIsClosed", _inputs_flat, _attrs, _result, name)
4047     _result, = _result
4048     return _result
4049 
4050   else:
4051     raise RuntimeError("queue_is_closed op does not support eager execution. Arg 'handle' is a ref.")
4052 
4053 
4054   raise RuntimeError("queue_is_closed op does not support eager execution. Arg 'handle' is a ref.")
4055 
4056 def queue_is_closed_v2(handle, name=None):
4057   r"""Returns true if queue is closed.
4058 
4059   This operation returns true if the queue is closed and false if the queue
4060   is open.
4061 
4062   Args:
4063     handle: A `Tensor` of type `resource`. The handle to a queue.
4064     name: A name for the operation (optional).
4065 
4066   Returns:
4067     A `Tensor` of type `bool`.
4068   """
4069   _ctx = _context._context
4070   if _ctx is None or not _ctx._eager_context.is_eager:
4071     _, _, _op = _op_def_lib._apply_op_helper(
4072         "QueueIsClosedV2", handle=handle, name=name)
4073     _result = _op.outputs[:]
4074     _inputs_flat = _op.inputs
4075     _attrs = None
4076     _execute.record_gradient(
4077       "QueueIsClosedV2", _inputs_flat, _attrs, _result, name)
4078     _result, = _result
4079     return _result
4080 
4081   else:
4082     try:
4083       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4084         _ctx._context_handle, _ctx._eager_context.device_name,
4085         "QueueIsClosedV2", name, _ctx._post_execution_callbacks, handle)
4086       return _result
4087     except _core._FallbackException:
4088       return queue_is_closed_v2_eager_fallback(
4089           handle, name=name, ctx=_ctx)
4090     except _core._NotOkStatusException as e:
4091       if name is not None:
4092         message = e.message + " name: " + name
4093       else:
4094         message = e.message
4095       _six.raise_from(_core._status_to_exception(e.code, message), None)
4096 
4097 
4098 def queue_is_closed_v2_eager_fallback(handle, name=None, ctx=None):
4099   r"""This is the slowpath function for Eager mode.
4100   This is for function queue_is_closed_v2
4101   """
4102   _ctx = ctx if ctx else _context.context()
4103   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
4104   _inputs_flat = [handle]
4105   _attrs = None
4106   _result = _execute.execute(b"QueueIsClosedV2", 1, inputs=_inputs_flat,
4107                              attrs=_attrs, ctx=_ctx, name=name)
4108   _execute.record_gradient(
4109       "QueueIsClosedV2", _inputs_flat, _attrs, _result, name)
4110   _result, = _result
4111   return _result
4112 
4113 
4114 def queue_size(handle, name=None):
4115   r"""Computes the number of elements in the given queue.
4116 
4117   Args:
4118     handle: A `Tensor` of type mutable `string`. The handle to a queue.
4119     name: A name for the operation (optional).
4120 
4121   Returns:
4122     A `Tensor` of type `int32`.
4123   """
4124   _ctx = _context._context
4125   if _ctx is None or not _ctx._eager_context.is_eager:
4126     _, _, _op = _op_def_lib._apply_op_helper(
4127         "QueueSize", handle=handle, name=name)
4128     _result = _op.outputs[:]
4129     _inputs_flat = _op.inputs
4130     _attrs = None
4131     _execute.record_gradient(
4132       "QueueSize", _inputs_flat, _attrs, _result, name)
4133     _result, = _result
4134     return _result
4135 
4136   else:
4137     raise RuntimeError("queue_size op does not support eager execution. Arg 'handle' is a ref.")
4138 
4139 
4140   raise RuntimeError("queue_size op does not support eager execution. Arg 'handle' is a ref.")
4141 
4142 def queue_size_v2(handle, name=None):
4143   r"""Computes the number of elements in the given queue.
4144 
4145   Args:
4146     handle: A `Tensor` of type `resource`. The handle to a queue.
4147     name: A name for the operation (optional).
4148 
4149   Returns:
4150     A `Tensor` of type `int32`.
4151   """
4152   _ctx = _context._context
4153   if _ctx is None or not _ctx._eager_context.is_eager:
4154     _, _, _op = _op_def_lib._apply_op_helper(
4155         "QueueSizeV2", handle=handle, name=name)
4156     _result = _op.outputs[:]
4157     _inputs_flat = _op.inputs
4158     _attrs = None
4159     _execute.record_gradient(
4160       "QueueSizeV2", _inputs_flat, _attrs, _result, name)
4161     _result, = _result
4162     return _result
4163 
4164   else:
4165     try:
4166       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4167         _ctx._context_handle, _ctx._eager_context.device_name, "QueueSizeV2",
4168         name, _ctx._post_execution_callbacks, handle)
4169       return _result
4170     except _core._FallbackException:
4171       return queue_size_v2_eager_fallback(
4172           handle, name=name, ctx=_ctx)
4173     except _core._NotOkStatusException as e:
4174       if name is not None:
4175         message = e.message + " name: " + name
4176       else:
4177         message = e.message
4178       _six.raise_from(_core._status_to_exception(e.code, message), None)
4179 
4180 
4181 def queue_size_v2_eager_fallback(handle, name=None, ctx=None):
4182   r"""This is the slowpath function for Eager mode.
4183   This is for function queue_size_v2
4184   """
4185   _ctx = ctx if ctx else _context.context()
4186   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
4187   _inputs_flat = [handle]
4188   _attrs = None
4189   _result = _execute.execute(b"QueueSizeV2", 1, inputs=_inputs_flat,
4190                              attrs=_attrs, ctx=_ctx, name=name)
4191   _execute.record_gradient(
4192       "QueueSizeV2", _inputs_flat, _attrs, _result, name)
4193   _result, = _result
4194   return _result
4195 
4196 
4197 def random_shuffle_queue(component_types, shapes=[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container="", shared_name="", name=None):
4198   r"""A queue that randomizes the order of elements.
4199 
4200   Args:
4201     component_types: A list of `tf.DTypes` that has length `>= 1`.
4202       The type of each component in a value.
4203     shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
4204       The shape of each component in a value. The length of this attr must
4205       be either 0 or the same as the length of component_types. If the length of
4206       this attr is 0, the shapes of queue elements are not constrained, and
4207       only one element may be dequeued at a time.
4208     capacity: An optional `int`. Defaults to `-1`.
4209       The upper bound on the number of elements in this queue.
4210       Negative numbers mean no limit.
4211     min_after_dequeue: An optional `int`. Defaults to `0`.
4212       Dequeue will block unless there would be this
4213       many elements after the dequeue or the queue is closed. This
4214       ensures a minimum level of mixing of elements.
4215     seed: An optional `int`. Defaults to `0`.
4216       If either seed or seed2 is set to be non-zero, the random number
4217       generator is seeded by the given seed.  Otherwise, a random seed is used.
4218     seed2: An optional `int`. Defaults to `0`.
4219       A second seed to avoid seed collision.
4220     container: An optional `string`. Defaults to `""`.
4221       If non-empty, this queue is placed in the given container.
4222       Otherwise, a default container is used.
4223     shared_name: An optional `string`. Defaults to `""`.
4224       If non-empty, this queue will be shared under the given name
4225       across multiple sessions.
4226     name: A name for the operation (optional).
4227 
4228   Returns:
4229     A `Tensor` of type mutable `string`.
4230   """
4231   _ctx = _context._context
4232   if _ctx is None or not _ctx._eager_context.is_eager:
4233     if not isinstance(component_types, (list, tuple)):
4234       raise TypeError(
4235           "Expected list for 'component_types' argument to "
4236           "'random_shuffle_queue' Op, not %r." % component_types)
4237     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
4238     if shapes is None:
4239       shapes = []
4240     if not isinstance(shapes, (list, tuple)):
4241       raise TypeError(
4242           "Expected list for 'shapes' argument to "
4243           "'random_shuffle_queue' Op, not %r." % shapes)
4244     shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
4245     if capacity is None:
4246       capacity = -1
4247     capacity = _execute.make_int(capacity, "capacity")
4248     if min_after_dequeue is None:
4249       min_after_dequeue = 0
4250     min_after_dequeue = _execute.make_int(min_after_dequeue, "min_after_dequeue")
4251     if seed is None:
4252       seed = 0
4253     seed = _execute.make_int(seed, "seed")
4254     if seed2 is None:
4255       seed2 = 0
4256     seed2 = _execute.make_int(seed2, "seed2")
4257     if container is None:
4258       container = ""
4259     container = _execute.make_str(container, "container")
4260     if shared_name is None:
4261       shared_name = ""
4262     shared_name = _execute.make_str(shared_name, "shared_name")
4263     _, _, _op = _op_def_lib._apply_op_helper(
4264         "RandomShuffleQueue", component_types=component_types, shapes=shapes,
4265         capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
4266         seed2=seed2, container=container, shared_name=shared_name, name=name)
4267     _result = _op.outputs[:]
4268     _inputs_flat = _op.inputs
4269     _attrs = ("component_types", _op.get_attr("component_types"), "shapes",
4270               _op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
4271               "min_after_dequeue", _op.get_attr("min_after_dequeue"), "seed",
4272               _op.get_attr("seed"), "seed2", _op.get_attr("seed2"),
4273               "container", _op.get_attr("container"), "shared_name",
4274               _op.get_attr("shared_name"))
4275     _execute.record_gradient(
4276       "RandomShuffleQueue", _inputs_flat, _attrs, _result, name)
4277     _result, = _result
4278     return _result
4279 
4280   else:
4281     raise RuntimeError("random_shuffle_queue op does not support eager execution. Arg 'handle' is a ref.")
4282 
4283 
4284   raise RuntimeError("random_shuffle_queue op does not support eager execution. Arg 'handle' is a ref.")
4285 
4286 def random_shuffle_queue_v2(component_types, shapes=[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container="", shared_name="", name=None):
4287   r"""A queue that randomizes the order of elements.
4288 
4289   Args:
4290     component_types: A list of `tf.DTypes` that has length `>= 1`.
4291       The type of each component in a value.
4292     shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
4293       The shape of each component in a value. The length of this attr must
4294       be either 0 or the same as the length of component_types. If the length of
4295       this attr is 0, the shapes of queue elements are not constrained, and
4296       only one element may be dequeued at a time.
4297     capacity: An optional `int`. Defaults to `-1`.
4298       The upper bound on the number of elements in this queue.
4299       Negative numbers mean no limit.
4300     min_after_dequeue: An optional `int`. Defaults to `0`.
4301       Dequeue will block unless there would be this
4302       many elements after the dequeue or the queue is closed. This
4303       ensures a minimum level of mixing of elements.
4304     seed: An optional `int`. Defaults to `0`.
4305       If either seed or seed2 is set to be non-zero, the random number
4306       generator is seeded by the given seed.  Otherwise, a random seed is used.
4307     seed2: An optional `int`. Defaults to `0`.
4308       A second seed to avoid seed collision.
4309     container: An optional `string`. Defaults to `""`.
4310       If non-empty, this queue is placed in the given container.
4311       Otherwise, a default container is used.
4312     shared_name: An optional `string`. Defaults to `""`.
4313       If non-empty, this queue will be shared under the given name
4314       across multiple sessions.
4315     name: A name for the operation (optional).
4316 
4317   Returns:
4318     A `Tensor` of type `resource`.
4319   """
4320   _ctx = _context._context
4321   if _ctx is None or not _ctx._eager_context.is_eager:
4322     if not isinstance(component_types, (list, tuple)):
4323       raise TypeError(
4324           "Expected list for 'component_types' argument to "
4325           "'random_shuffle_queue_v2' Op, not %r." % component_types)
4326     component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
4327     if shapes is None:
4328       shapes = []
4329     if not isinstance(shapes, (list, tuple)):
4330       raise TypeError(
4331           "Expected list for 'shapes' argument to "
4332           "'random_shuffle_queue_v2' Op, not %r." % shapes)
4333     shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
4334     if capacity is None:
4335       capacity = -1
4336     capacity = _execute.make_int(capacity, "capacity")
4337     if min_after_dequeue is None:
4338       min_after_dequeue = 0
4339     min_after_dequeue = _execute.make_int(min_after_dequeue, "min_after_dequeue")
4340     if seed is None:
4341       seed = 0
4342     seed = _execute.make_int(seed, "seed")
4343     if seed2 is None:
4344       seed2 = 0
4345     seed2 = _execute.make_int(seed2, "seed2")
4346     if container is None:
4347       container = ""
4348     container = _execute.make_str(container, "container")
4349     if shared_name is None:
4350       shared_name = ""
4351     shared_name = _execute.make_str(shared_name, "shared_name")
4352     _, _, _op = _op_def_lib._apply_op_helper(
4353         "RandomShuffleQueueV2", component_types=component_types,
4354         shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue,
4355         seed=seed, seed2=seed2, container=container, shared_name=shared_name,
4356         name=name)
4357     _result = _op.outputs[:]
4358     _inputs_flat = _op.inputs
4359     _attrs = ("component_types", _op.get_attr("component_types"), "shapes",
4360               _op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
4361               "min_after_dequeue", _op.get_attr("min_after_dequeue"), "seed",
4362               _op.get_attr("seed"), "seed2", _op.get_attr("seed2"),
4363               "container", _op.get_attr("container"), "shared_name",
4364               _op.get_attr("shared_name"))
4365     _execute.record_gradient(
4366       "RandomShuffleQueueV2", _inputs_flat, _attrs, _result, name)
4367     _result, = _result
4368     return _result
4369 
4370   else:
4371     try:
4372       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4373         _ctx._context_handle, _ctx._eager_context.device_name,
4374         "RandomShuffleQueueV2", name, _ctx._post_execution_callbacks,
4375         "component_types", component_types, "shapes", shapes, "capacity",
4376         capacity, "min_after_dequeue", min_after_dequeue, "seed", seed,
4377         "seed2", seed2, "container", container, "shared_name", shared_name)
4378       return _result
4379     except _core._FallbackException:
4380       return random_shuffle_queue_v2_eager_fallback(
4381           component_types=component_types, shapes=shapes, capacity=capacity,
4382           min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2,
4383           container=container, shared_name=shared_name, name=name, ctx=_ctx)
4384     except _core._NotOkStatusException as e:
4385       if name is not None:
4386         message = e.message + " name: " + name
4387       else:
4388         message = e.message
4389       _six.raise_from(_core._status_to_exception(e.code, message), None)
4390 
4391 
4392 def random_shuffle_queue_v2_eager_fallback(component_types, shapes=[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container="", shared_name="", name=None, ctx=None):
4393   r"""This is the slowpath function for Eager mode.
4394   This is for function random_shuffle_queue_v2
4395   """
4396   _ctx = ctx if ctx else _context.context()
4397   if not isinstance(component_types, (list, tuple)):
4398     raise TypeError(
4399         "Expected list for 'component_types' argument to "
4400         "'random_shuffle_queue_v2' Op, not %r." % component_types)
4401   component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
4402   if shapes is None:
4403     shapes = []
4404   if not isinstance(shapes, (list, tuple)):
4405     raise TypeError(
4406         "Expected list for 'shapes' argument to "
4407         "'random_shuffle_queue_v2' Op, not %r." % shapes)
4408   shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
4409   if capacity is None:
4410     capacity = -1
4411   capacity = _execute.make_int(capacity, "capacity")
4412   if min_after_dequeue is None:
4413     min_after_dequeue = 0
4414   min_after_dequeue = _execute.make_int(min_after_dequeue, "min_after_dequeue")
4415   if seed is None:
4416     seed = 0
4417   seed = _execute.make_int(seed, "seed")
4418   if seed2 is None:
4419     seed2 = 0
4420   seed2 = _execute.make_int(seed2, "seed2")
4421   if container is None:
4422     container = ""
4423   container = _execute.make_str(container, "container")
4424   if shared_name is None:
4425     shared_name = ""
4426   shared_name = _execute.make_str(shared_name, "shared_name")
4427   _inputs_flat = []
4428   _attrs = ("component_types", component_types, "shapes", shapes, "capacity",
4429   capacity, "min_after_dequeue", min_after_dequeue, "seed", seed, "seed2",
4430   seed2, "container", container, "shared_name", shared_name)
4431   _result = _execute.execute(b"RandomShuffleQueueV2", 1, inputs=_inputs_flat,
4432                              attrs=_attrs, ctx=_ctx, name=name)
4433   _execute.record_gradient(
4434       "RandomShuffleQueueV2", _inputs_flat, _attrs, _result, name)
4435   _result, = _result
4436   return _result
4437 
4438 
4439 def record_input(file_pattern, file_random_seed=301, file_shuffle_shift_ratio=0, file_buffer_size=10000, file_parallelism=16, batch_size=32, compression_type="", name=None):
4440   r"""Emits randomized records.
4441 
4442   Args:
4443     file_pattern: A `string`. Glob pattern for the data files.
4444     file_random_seed: An optional `int`. Defaults to `301`.
4445       Random seeds used to produce randomized records.
4446     file_shuffle_shift_ratio: An optional `float`. Defaults to `0`.
4447       Shifts the list of files after the list is randomly
4448       shuffled.
4449     file_buffer_size: An optional `int`. Defaults to `10000`.
4450       The randomization shuffling buffer.
4451     file_parallelism: An optional `int`. Defaults to `16`.
4452       How many sstables are opened and concurrently iterated over.
4453     batch_size: An optional `int`. Defaults to `32`. The batch size.
4454     compression_type: An optional `string`. Defaults to `""`.
4455       The type of compression for the file. Currently ZLIB and
4456       GZIP are supported. Defaults to none.
4457     name: A name for the operation (optional).
4458 
4459   Returns:
4460     A `Tensor` of type `string`.
4461   """
4462   _ctx = _context._context
4463   if _ctx is None or not _ctx._eager_context.is_eager:
4464     file_pattern = _execute.make_str(file_pattern, "file_pattern")
4465     if file_random_seed is None:
4466       file_random_seed = 301
4467     file_random_seed = _execute.make_int(file_random_seed, "file_random_seed")
4468     if file_shuffle_shift_ratio is None:
4469       file_shuffle_shift_ratio = 0
4470     file_shuffle_shift_ratio = _execute.make_float(file_shuffle_shift_ratio, "file_shuffle_shift_ratio")
4471     if file_buffer_size is None:
4472       file_buffer_size = 10000
4473     file_buffer_size = _execute.make_int(file_buffer_size, "file_buffer_size")
4474     if file_parallelism is None:
4475       file_parallelism = 16
4476     file_parallelism = _execute.make_int(file_parallelism, "file_parallelism")
4477     if batch_size is None:
4478       batch_size = 32
4479     batch_size = _execute.make_int(batch_size, "batch_size")
4480     if compression_type is None:
4481       compression_type = ""
4482     compression_type = _execute.make_str(compression_type, "compression_type")
4483     _, _, _op = _op_def_lib._apply_op_helper(
4484         "RecordInput", file_pattern=file_pattern,
4485         file_random_seed=file_random_seed,
4486         file_shuffle_shift_ratio=file_shuffle_shift_ratio,
4487         file_buffer_size=file_buffer_size, file_parallelism=file_parallelism,
4488         batch_size=batch_size, compression_type=compression_type, name=name)
4489     _result = _op.outputs[:]
4490     _inputs_flat = _op.inputs
4491     _attrs = ("file_pattern", _op.get_attr("file_pattern"),
4492               "file_random_seed", _op.get_attr("file_random_seed"),
4493               "file_shuffle_shift_ratio",
4494               _op.get_attr("file_shuffle_shift_ratio"), "file_buffer_size",
4495               _op.get_attr("file_buffer_size"), "file_parallelism",
4496               _op.get_attr("file_parallelism"), "batch_size",
4497               _op.get_attr("batch_size"), "compression_type",
4498               _op.get_attr("compression_type"))
4499     _execute.record_gradient(
4500       "RecordInput", _inputs_flat, _attrs, _result, name)
4501     _result, = _result
4502     return _result
4503 
4504   else:
4505     try:
4506       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4507         _ctx._context_handle, _ctx._eager_context.device_name, "RecordInput",
4508         name, _ctx._post_execution_callbacks, "file_pattern", file_pattern,
4509         "file_random_seed", file_random_seed, "file_shuffle_shift_ratio",
4510         file_shuffle_shift_ratio, "file_buffer_size", file_buffer_size,
4511         "file_parallelism", file_parallelism, "batch_size", batch_size,
4512         "compression_type", compression_type)
4513       return _result
4514     except _core._FallbackException:
4515       return record_input_eager_fallback(
4516           file_pattern=file_pattern, file_random_seed=file_random_seed,
4517           file_shuffle_shift_ratio=file_shuffle_shift_ratio,
4518           file_buffer_size=file_buffer_size,
4519           file_parallelism=file_parallelism, batch_size=batch_size,
4520           compression_type=compression_type, name=name, ctx=_ctx)
4521     except _core._NotOkStatusException as e:
4522       if name is not None:
4523         message = e.message + " name: " + name
4524       else:
4525         message = e.message
4526       _six.raise_from(_core._status_to_exception(e.code, message), None)
4527 
4528 
4529 def record_input_eager_fallback(file_pattern, file_random_seed=301, file_shuffle_shift_ratio=0, file_buffer_size=10000, file_parallelism=16, batch_size=32, compression_type="", name=None, ctx=None):
4530   r"""This is the slowpath function for Eager mode.
4531   This is for function record_input
4532   """
4533   _ctx = ctx if ctx else _context.context()
4534   file_pattern = _execute.make_str(file_pattern, "file_pattern")
4535   if file_random_seed is None:
4536     file_random_seed = 301
4537   file_random_seed = _execute.make_int(file_random_seed, "file_random_seed")
4538   if file_shuffle_shift_ratio is None:
4539     file_shuffle_shift_ratio = 0
4540   file_shuffle_shift_ratio = _execute.make_float(file_shuffle_shift_ratio, "file_shuffle_shift_ratio")
4541   if file_buffer_size is None:
4542     file_buffer_size = 10000
4543   file_buffer_size = _execute.make_int(file_buffer_size, "file_buffer_size")
4544   if file_parallelism is None:
4545     file_parallelism = 16
4546   file_parallelism = _execute.make_int(file_parallelism, "file_parallelism")
4547   if batch_size is None:
4548     batch_size = 32
4549   batch_size = _execute.make_int(batch_size, "batch_size")
4550   if compression_type is None:
4551     compression_type = ""
4552   compression_type = _execute.make_str(compression_type, "compression_type")
4553   _inputs_flat = []
4554   _attrs = ("file_pattern", file_pattern, "file_random_seed",
4555   file_random_seed, "file_shuffle_shift_ratio", file_shuffle_shift_ratio,
4556   "file_buffer_size", file_buffer_size, "file_parallelism", file_parallelism,
4557   "batch_size", batch_size, "compression_type", compression_type)
4558   _result = _execute.execute(b"RecordInput", 1, inputs=_inputs_flat,
4559                              attrs=_attrs, ctx=_ctx, name=name)
4560   _execute.record_gradient(
4561       "RecordInput", _inputs_flat, _attrs, _result, name)
4562   _result, = _result
4563   return _result
4564 
4565 
4566 def sparse_accumulator_apply_gradient(handle, local_step, gradient_indices, gradient_values, gradient_shape, has_known_shape, name=None):
4567   r"""Applies a sparse gradient to a given accumulator.
4568 
4569   Does not add if local_step is smaller than the accumulator's
4570   global_step.
4571 
4572   Args:
4573     handle: A `Tensor` of type mutable `string`. The handle to a accumulator.
4574     local_step: A `Tensor` of type `int64`.
4575       The local_step value at which the sparse gradient was computed.
4576     gradient_indices: A `Tensor` of type `int64`.
4577       Indices of the sparse gradient to be accumulated. Must be a
4578       vector.
4579     gradient_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
4580       Values are the non-zero slices of the gradient, and must have
4581       the same first dimension as indices, i.e., the nnz represented by indices and
4582       values must be consistent.
4583     gradient_shape: A `Tensor` of type `int64`.
4584       Shape of the sparse gradient to be accumulated.
4585     has_known_shape: A `bool`.
4586       Boolean indicating whether gradient_shape is unknown, in which
4587       case the input is ignored during validation.
4588     name: A name for the operation (optional).
4589 
4590   Returns:
4591     The created Operation.
4592   """
4593   _ctx = _context._context
4594   if _ctx is None or not _ctx._eager_context.is_eager:
4595     has_known_shape = _execute.make_bool(has_known_shape, "has_known_shape")
4596     _, _, _op = _op_def_lib._apply_op_helper(
4597         "SparseAccumulatorApplyGradient", handle=handle,
4598         local_step=local_step, gradient_indices=gradient_indices,
4599         gradient_values=gradient_values, gradient_shape=gradient_shape,
4600         has_known_shape=has_known_shape, name=name)
4601     return _op
4602     _result = None
4603     return _result
4604 
4605   else:
4606     raise RuntimeError("sparse_accumulator_apply_gradient op does not support eager execution. Arg 'handle' is a ref.")
4607 
4608 
4609   raise RuntimeError("sparse_accumulator_apply_gradient op does not support eager execution. Arg 'handle' is a ref.")
4610 
4611 _sparse_accumulator_take_gradient_outputs = ["indices", "values", "shape"]
4612 _SparseAccumulatorTakeGradientOutput = _collections.namedtuple(
4613     "SparseAccumulatorTakeGradient",
4614     _sparse_accumulator_take_gradient_outputs)
4615 
4616 
4617 def sparse_accumulator_take_gradient(handle, num_required, dtype, name=None):
4618   r"""Extracts the average sparse gradient in a SparseConditionalAccumulator.
4619 
4620   The op will blocks until sufficient (i.e., more than num_required)
4621   gradients have been accumulated. If the accumulator has already
4622   aggregated more than num_required gradients, it will return its
4623   average of the accumulated gradients.  Also automatically increments
4624   the recorded global_step in the accumulator by 1, and resets the
4625   aggregate to 0.
4626 
4627   Args:
4628     handle: A `Tensor` of type mutable `string`.
4629       The handle to a SparseConditionalAccumulator.
4630     num_required: A `Tensor` of type `int32`.
4631       Number of gradients required before we return an aggregate.
4632     dtype: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`.
4633       The data type of accumulated gradients. Needs to correspond to the type
4634       of the accumulator.
4635     name: A name for the operation (optional).
4636 
4637   Returns:
4638     A tuple of `Tensor` objects (indices, values, shape).
4639 
4640     indices: A `Tensor` of type `int64`.
4641     values: A `Tensor` of type `dtype`.
4642     shape: A `Tensor` of type `int64`.
4643   """
4644   _ctx = _context._context
4645   if _ctx is None or not _ctx._eager_context.is_eager:
4646     dtype = _execute.make_type(dtype, "dtype")
4647     _, _, _op = _op_def_lib._apply_op_helper(
4648         "SparseAccumulatorTakeGradient", handle=handle,
4649         num_required=num_required, dtype=dtype, name=name)
4650     _result = _op.outputs[:]
4651     _inputs_flat = _op.inputs
4652     _attrs = ("dtype", _op.get_attr("dtype"))
4653     _execute.record_gradient(
4654       "SparseAccumulatorTakeGradient", _inputs_flat, _attrs, _result, name)
4655     _result = _SparseAccumulatorTakeGradientOutput._make(_result)
4656     return _result
4657 
4658   else:
4659     raise RuntimeError("sparse_accumulator_take_gradient op does not support eager execution. Arg 'handle' is a ref.")
4660 
4661 
4662   raise RuntimeError("sparse_accumulator_take_gradient op does not support eager execution. Arg 'handle' is a ref.")
4663 
4664 def sparse_conditional_accumulator(dtype, shape, container="", shared_name="", reduction_type="MEAN", name=None):
4665   r"""A conditional accumulator for aggregating sparse gradients.
4666 
4667   The accumulator accepts gradients marked with local_step greater or
4668   equal to the most recent global_step known to the accumulator. The
4669   average can be extracted from the accumulator, provided sufficient
4670   gradients have been accumulated. Extracting the average automatically
4671   resets the aggregate to 0, and increments the global_step recorded by
4672   the accumulator.
4673 
4674   Args:
4675     dtype: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`.
4676       The type of the value being accumulated.
4677     shape: A `tf.TensorShape` or list of `ints`. The shape of the values.
4678     container: An optional `string`. Defaults to `""`.
4679       If non-empty, this accumulator is placed in the given container.
4680       Otherwise, a default container is used.
4681     shared_name: An optional `string`. Defaults to `""`.
4682       If non-empty, this accumulator will be shared under the given name
4683       across multiple sessions.
4684     reduction_type: An optional `string` from: `"MEAN", "SUM"`. Defaults to `"MEAN"`.
4685     name: A name for the operation (optional).
4686 
4687   Returns:
4688     A `Tensor` of type mutable `string`.
4689   """
4690   _ctx = _context._context
4691   if _ctx is None or not _ctx._eager_context.is_eager:
4692     dtype = _execute.make_type(dtype, "dtype")
4693     shape = _execute.make_shape(shape, "shape")
4694     if container is None:
4695       container = ""
4696     container = _execute.make_str(container, "container")
4697     if shared_name is None:
4698       shared_name = ""
4699     shared_name = _execute.make_str(shared_name, "shared_name")
4700     if reduction_type is None:
4701       reduction_type = "MEAN"
4702     reduction_type = _execute.make_str(reduction_type, "reduction_type")
4703     _, _, _op = _op_def_lib._apply_op_helper(
4704         "SparseConditionalAccumulator", dtype=dtype, shape=shape,
4705         container=container, shared_name=shared_name,
4706         reduction_type=reduction_type, name=name)
4707     _result = _op.outputs[:]
4708     _inputs_flat = _op.inputs
4709     _attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape"),
4710               "container", _op.get_attr("container"), "shared_name",
4711               _op.get_attr("shared_name"), "reduction_type",
4712               _op.get_attr("reduction_type"))
4713     _execute.record_gradient(
4714       "SparseConditionalAccumulator", _inputs_flat, _attrs, _result, name)
4715     _result, = _result
4716     return _result
4717 
4718   else:
4719     raise RuntimeError("sparse_conditional_accumulator op does not support eager execution. Arg 'handle' is a ref.")
4720 
4721 
4722   raise RuntimeError("sparse_conditional_accumulator op does not support eager execution. Arg 'handle' is a ref.")
4723 
4724 def _stack(elem_type, stack_name="", name=None):
4725   r"""Deprecated, use StackV2.
4726 
4727   Args:
4728     elem_type: A `tf.DType`.
4729     stack_name: An optional `string`. Defaults to `""`.
4730     name: A name for the operation (optional).
4731 
4732   Returns:
4733     A `Tensor` of type mutable `string`.
4734   """
4735   _ctx = _context._context
4736   if _ctx is None or not _ctx._eager_context.is_eager:
4737     elem_type = _execute.make_type(elem_type, "elem_type")
4738     if stack_name is None:
4739       stack_name = ""
4740     stack_name = _execute.make_str(stack_name, "stack_name")
4741     _, _, _op = _op_def_lib._apply_op_helper(
4742         "Stack", elem_type=elem_type, stack_name=stack_name, name=name)
4743     _result = _op.outputs[:]
4744     _inputs_flat = _op.inputs
4745     _attrs = ("elem_type", _op.get_attr("elem_type"), "stack_name",
4746               _op.get_attr("stack_name"))
4747     _execute.record_gradient(
4748       "Stack", _inputs_flat, _attrs, _result, name)
4749     _result, = _result
4750     return _result
4751 
4752   else:
4753     raise RuntimeError("stack op does not support eager execution. Arg 'handle' is a ref.")
4754 
4755 
4756   raise RuntimeError("stack op does not support eager execution. Arg 'handle' is a ref.")
4757 
4758 def stack_close(handle, name=None):
4759   r"""Deprecated, use StackCloseV2.
4760 
4761   Args:
4762     handle: A `Tensor` of type mutable `string`.
4763     name: A name for the operation (optional).
4764 
4765   Returns:
4766     The created Operation.
4767   """
4768   _ctx = _context._context
4769   if _ctx is None or not _ctx._eager_context.is_eager:
4770     _, _, _op = _op_def_lib._apply_op_helper(
4771         "StackClose", handle=handle, name=name)
4772     return _op
4773     _result = None
4774     return _result
4775 
4776   else:
4777     raise RuntimeError("stack_close op does not support eager execution. Arg 'handle' is a ref.")
4778 
4779 
4780   raise RuntimeError("stack_close op does not support eager execution. Arg 'handle' is a ref.")
4781 
4782 def stack_close_v2(handle, name=None):
4783   r"""Delete the stack from its resource container.
4784 
4785   Args:
4786     handle: A `Tensor` of type `resource`. The handle to a stack.
4787     name: A name for the operation (optional).
4788 
4789   Returns:
4790     The created Operation.
4791   """
4792   _ctx = _context._context
4793   if _ctx is None or not _ctx._eager_context.is_eager:
4794     _, _, _op = _op_def_lib._apply_op_helper(
4795         "StackCloseV2", handle=handle, name=name)
4796     return _op
4797     _result = None
4798     return _result
4799 
4800   else:
4801     try:
4802       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4803         _ctx._context_handle, _ctx._eager_context.device_name, "StackCloseV2",
4804         name, _ctx._post_execution_callbacks, handle)
4805       return _result
4806     except _core._FallbackException:
4807       return stack_close_v2_eager_fallback(
4808           handle, name=name, ctx=_ctx)
4809     except _core._NotOkStatusException as e:
4810       if name is not None:
4811         message = e.message + " name: " + name
4812       else:
4813         message = e.message
4814       _six.raise_from(_core._status_to_exception(e.code, message), None)
4815 
4816 
4817 def stack_close_v2_eager_fallback(handle, name=None, ctx=None):
4818   r"""This is the slowpath function for Eager mode.
4819   This is for function stack_close_v2
4820   """
4821   _ctx = ctx if ctx else _context.context()
4822   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
4823   _inputs_flat = [handle]
4824   _attrs = None
4825   _result = _execute.execute(b"StackCloseV2", 0, inputs=_inputs_flat,
4826                              attrs=_attrs, ctx=_ctx, name=name)
4827   _result = None
4828   return _result
4829 
4830 
4831 def stack_pop(handle, elem_type, name=None):
4832   r"""Deprecated, use StackPopV2.
4833 
4834   Args:
4835     handle: A `Tensor` of type mutable `string`.
4836     elem_type: A `tf.DType`.
4837     name: A name for the operation (optional).
4838 
4839   Returns:
4840     A `Tensor` of type `elem_type`.
4841   """
4842   _ctx = _context._context
4843   if _ctx is None or not _ctx._eager_context.is_eager:
4844     elem_type = _execute.make_type(elem_type, "elem_type")
4845     _, _, _op = _op_def_lib._apply_op_helper(
4846         "StackPop", handle=handle, elem_type=elem_type, name=name)
4847     _result = _op.outputs[:]
4848     _inputs_flat = _op.inputs
4849     _attrs = ("elem_type", _op.get_attr("elem_type"))
4850     _execute.record_gradient(
4851       "StackPop", _inputs_flat, _attrs, _result, name)
4852     _result, = _result
4853     return _result
4854 
4855   else:
4856     raise RuntimeError("stack_pop op does not support eager execution. Arg 'handle' is a ref.")
4857 
4858 
4859   raise RuntimeError("stack_pop op does not support eager execution. Arg 'handle' is a ref.")
4860 
4861 def stack_pop_v2(handle, elem_type, name=None):
4862   r"""Pop the element at the top of the stack.
4863 
4864   Args:
4865     handle: A `Tensor` of type `resource`. The handle to a stack.
4866     elem_type: A `tf.DType`. The type of the elem that is popped.
4867     name: A name for the operation (optional).
4868 
4869   Returns:
4870     A `Tensor` of type `elem_type`.
4871   """
4872   _ctx = _context._context
4873   if _ctx is None or not _ctx._eager_context.is_eager:
4874     elem_type = _execute.make_type(elem_type, "elem_type")
4875     _, _, _op = _op_def_lib._apply_op_helper(
4876         "StackPopV2", handle=handle, elem_type=elem_type, name=name)
4877     _result = _op.outputs[:]
4878     _inputs_flat = _op.inputs
4879     _attrs = ("elem_type", _op.get_attr("elem_type"))
4880     _execute.record_gradient(
4881       "StackPopV2", _inputs_flat, _attrs, _result, name)
4882     _result, = _result
4883     return _result
4884 
4885   else:
4886     try:
4887       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4888         _ctx._context_handle, _ctx._eager_context.device_name, "StackPopV2",
4889         name, _ctx._post_execution_callbacks, handle, "elem_type", elem_type)
4890       return _result
4891     except _core._FallbackException:
4892       return stack_pop_v2_eager_fallback(
4893           handle, elem_type=elem_type, name=name, ctx=_ctx)
4894     except _core._NotOkStatusException as e:
4895       if name is not None:
4896         message = e.message + " name: " + name
4897       else:
4898         message = e.message
4899       _six.raise_from(_core._status_to_exception(e.code, message), None)
4900 
4901 
4902 def stack_pop_v2_eager_fallback(handle, elem_type, name=None, ctx=None):
4903   r"""This is the slowpath function for Eager mode.
4904   This is for function stack_pop_v2
4905   """
4906   _ctx = ctx if ctx else _context.context()
4907   elem_type = _execute.make_type(elem_type, "elem_type")
4908   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
4909   _inputs_flat = [handle]
4910   _attrs = ("elem_type", elem_type)
4911   _result = _execute.execute(b"StackPopV2", 1, inputs=_inputs_flat,
4912                              attrs=_attrs, ctx=_ctx, name=name)
4913   _execute.record_gradient(
4914       "StackPopV2", _inputs_flat, _attrs, _result, name)
4915   _result, = _result
4916   return _result
4917 
4918 
4919 def stack_push(handle, elem, swap_memory=False, name=None):
4920   r"""Deprecated, use StackPushV2.
4921 
4922   Args:
4923     handle: A `Tensor` of type mutable `string`.
4924     elem: A `Tensor`.
4925     swap_memory: An optional `bool`. Defaults to `False`.
4926     name: A name for the operation (optional).
4927 
4928   Returns:
4929     A `Tensor`. Has the same type as `elem`.
4930   """
4931   _ctx = _context._context
4932   if _ctx is None or not _ctx._eager_context.is_eager:
4933     if swap_memory is None:
4934       swap_memory = False
4935     swap_memory = _execute.make_bool(swap_memory, "swap_memory")
4936     _, _, _op = _op_def_lib._apply_op_helper(
4937         "StackPush", handle=handle, elem=elem, swap_memory=swap_memory,
4938         name=name)
4939     _result = _op.outputs[:]
4940     _inputs_flat = _op.inputs
4941     _attrs = ("T", _op.get_attr("T"), "swap_memory",
4942               _op.get_attr("swap_memory"))
4943     _execute.record_gradient(
4944       "StackPush", _inputs_flat, _attrs, _result, name)
4945     _result, = _result
4946     return _result
4947 
4948   else:
4949     raise RuntimeError("stack_push op does not support eager execution. Arg 'handle' is a ref.")
4950 
4951 
4952   raise RuntimeError("stack_push op does not support eager execution. Arg 'handle' is a ref.")
4953 
4954 def stack_push_v2(handle, elem, swap_memory=False, name=None):
4955   r"""Push an element onto the stack.
4956 
4957   Args:
4958     handle: A `Tensor` of type `resource`. The handle to a stack.
4959     elem: A `Tensor`. The tensor to be pushed onto the stack.
4960     swap_memory: An optional `bool`. Defaults to `False`.
4961       Swap `elem` to CPU. Default to false.
4962     name: A name for the operation (optional).
4963 
4964   Returns:
4965     A `Tensor`. Has the same type as `elem`.
4966   """
4967   _ctx = _context._context
4968   if _ctx is None or not _ctx._eager_context.is_eager:
4969     if swap_memory is None:
4970       swap_memory = False
4971     swap_memory = _execute.make_bool(swap_memory, "swap_memory")
4972     _, _, _op = _op_def_lib._apply_op_helper(
4973         "StackPushV2", handle=handle, elem=elem, swap_memory=swap_memory,
4974         name=name)
4975     _result = _op.outputs[:]
4976     _inputs_flat = _op.inputs
4977     _attrs = ("T", _op.get_attr("T"), "swap_memory",
4978               _op.get_attr("swap_memory"))
4979     _execute.record_gradient(
4980       "StackPushV2", _inputs_flat, _attrs, _result, name)
4981     _result, = _result
4982     return _result
4983 
4984   else:
4985     try:
4986       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4987         _ctx._context_handle, _ctx._eager_context.device_name, "StackPushV2",
4988         name, _ctx._post_execution_callbacks, handle, elem, "swap_memory",
4989         swap_memory)
4990       return _result
4991     except _core._FallbackException:
4992       return stack_push_v2_eager_fallback(
4993           handle, elem, swap_memory=swap_memory, name=name, ctx=_ctx)
4994     except _core._NotOkStatusException as e:
4995       if name is not None:
4996         message = e.message + " name: " + name
4997       else:
4998         message = e.message
4999       _six.raise_from(_core._status_to_exception(e.code, message), None)
5000 
5001 
5002 def stack_push_v2_eager_fallback(handle, elem, swap_memory=False, name=None, ctx=None):
5003   r"""This is the slowpath function for Eager mode.
5004   This is for function stack_push_v2
5005   """
5006   _ctx = ctx if ctx else _context.context()
5007   if swap_memory is None:
5008     swap_memory = False
5009   swap_memory = _execute.make_bool(swap_memory, "swap_memory")
5010   _attr_T, (elem,) = _execute.args_to_matching_eager([elem], _ctx)
5011   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
5012   _inputs_flat = [handle, elem]
5013   _attrs = ("T", _attr_T, "swap_memory", swap_memory)
5014   _result = _execute.execute(b"StackPushV2", 1, inputs=_inputs_flat,
5015                              attrs=_attrs, ctx=_ctx, name=name)
5016   _execute.record_gradient(
5017       "StackPushV2", _inputs_flat, _attrs, _result, name)
5018   _result, = _result
5019   return _result
5020 
5021 
5022 def stack_v2(max_size, elem_type, stack_name="", name=None):
5023   r"""A stack that produces elements in first-in last-out order.
5024 
5025   Args:
5026     max_size: A `Tensor` of type `int32`.
5027       The maximum size of the stack if non-negative. If negative, the stack
5028       size is unlimited.
5029     elem_type: A `tf.DType`. The type of the elements on the stack.
5030     stack_name: An optional `string`. Defaults to `""`.
5031       Overrides the name used for the temporary stack resource. Default
5032       value is the name of the 'Stack' op (which is guaranteed unique).
5033     name: A name for the operation (optional).
5034 
5035   Returns:
5036     A `Tensor` of type `resource`.
5037   """
5038   _ctx = _context._context
5039   if _ctx is None or not _ctx._eager_context.is_eager:
5040     elem_type = _execute.make_type(elem_type, "elem_type")
5041     if stack_name is None:
5042       stack_name = ""
5043     stack_name = _execute.make_str(stack_name, "stack_name")
5044     _, _, _op = _op_def_lib._apply_op_helper(
5045         "StackV2", max_size=max_size, elem_type=elem_type,
5046         stack_name=stack_name, name=name)
5047     _result = _op.outputs[:]
5048     _inputs_flat = _op.inputs
5049     _attrs = ("elem_type", _op.get_attr("elem_type"), "stack_name",
5050               _op.get_attr("stack_name"))
5051     _execute.record_gradient(
5052       "StackV2", _inputs_flat, _attrs, _result, name)
5053     _result, = _result
5054     return _result
5055 
5056   else:
5057     try:
5058       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5059         _ctx._context_handle, _ctx._eager_context.device_name, "StackV2",
5060         name, _ctx._post_execution_callbacks, max_size, "elem_type",
5061         elem_type, "stack_name", stack_name)
5062       return _result
5063     except _core._FallbackException:
5064       return stack_v2_eager_fallback(
5065           max_size, elem_type=elem_type, stack_name=stack_name, name=name,
5066           ctx=_ctx)
5067     except _core._NotOkStatusException as e:
5068       if name is not None:
5069         message = e.message + " name: " + name
5070       else:
5071         message = e.message
5072       _six.raise_from(_core._status_to_exception(e.code, message), None)
5073 
5074 
5075 def stack_v2_eager_fallback(max_size, elem_type, stack_name="", name=None, ctx=None):
5076   r"""This is the slowpath function for Eager mode.
5077   This is for function stack_v2
5078   """
5079   _ctx = ctx if ctx else _context.context()
5080   elem_type = _execute.make_type(elem_type, "elem_type")
5081   if stack_name is None:
5082     stack_name = ""
5083   stack_name = _execute.make_str(stack_name, "stack_name")
5084   max_size = _ops.convert_to_tensor(max_size, _dtypes.int32)
5085   _inputs_flat = [max_size]
5086   _attrs = ("elem_type", elem_type, "stack_name", stack_name)
5087   _result = _execute.execute(b"StackV2", 1, inputs=_inputs_flat, attrs=_attrs,
5088                              ctx=_ctx, name=name)
5089   _execute.record_gradient(
5090       "StackV2", _inputs_flat, _attrs, _result, name)
5091   _result, = _result
5092   return _result
5093 
5094 
5095 def stage(values, capacity=0, memory_limit=0, container="", shared_name="", name=None):
5096   r"""Stage values similar to a lightweight Enqueue.
5097 
5098   The basic functionality of this Op is similar to a queue with many
5099   fewer capabilities and options.  This Op is optimized for performance.
5100 
5101   Args:
5102     values: A list of `Tensor` objects. a list of tensors
5103       dtypes A list of data types that inserted values should adhere to.
5104     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
5105       Maximum number of elements in the Staging Area. If > 0, inserts
5106       on the container will block when the capacity is reached.
5107     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
5108       The maximum number of bytes allowed for Tensors in the Staging Area.
5109       If > 0, inserts will block until sufficient space is available.
5110     container: An optional `string`. Defaults to `""`.
5111       If non-empty, this queue is placed in the given container. Otherwise,
5112       a default container is used.
5113     shared_name: An optional `string`. Defaults to `""`.
5114       It is necessary to match this name to the matching Unstage Op.
5115     name: A name for the operation (optional).
5116 
5117   Returns:
5118     The created Operation.
5119   """
5120   _ctx = _context._context
5121   if _ctx is None or not _ctx._eager_context.is_eager:
5122     if capacity is None:
5123       capacity = 0
5124     capacity = _execute.make_int(capacity, "capacity")
5125     if memory_limit is None:
5126       memory_limit = 0
5127     memory_limit = _execute.make_int(memory_limit, "memory_limit")
5128     if container is None:
5129       container = ""
5130     container = _execute.make_str(container, "container")
5131     if shared_name is None:
5132       shared_name = ""
5133     shared_name = _execute.make_str(shared_name, "shared_name")
5134     _, _, _op = _op_def_lib._apply_op_helper(
5135         "Stage", values=values, capacity=capacity, memory_limit=memory_limit,
5136         container=container, shared_name=shared_name, name=name)
5137     return _op
5138     _result = None
5139     return _result
5140 
5141   else:
5142     try:
5143       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5144         _ctx._context_handle, _ctx._eager_context.device_name, "Stage", name,
5145         _ctx._post_execution_callbacks, values, "capacity", capacity,
5146         "memory_limit", memory_limit, "container", container, "shared_name",
5147         shared_name)
5148       return _result
5149     except _core._FallbackException:
5150       return stage_eager_fallback(
5151           values, capacity=capacity, memory_limit=memory_limit,
5152           container=container, shared_name=shared_name, name=name, ctx=_ctx)
5153     except _core._NotOkStatusException as e:
5154       if name is not None:
5155         message = e.message + " name: " + name
5156       else:
5157         message = e.message
5158       _six.raise_from(_core._status_to_exception(e.code, message), None)
5159 
5160 
5161 def stage_eager_fallback(values, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
5162   r"""This is the slowpath function for Eager mode.
5163   This is for function stage
5164   """
5165   _ctx = ctx if ctx else _context.context()
5166   if capacity is None:
5167     capacity = 0
5168   capacity = _execute.make_int(capacity, "capacity")
5169   if memory_limit is None:
5170     memory_limit = 0
5171   memory_limit = _execute.make_int(memory_limit, "memory_limit")
5172   if container is None:
5173     container = ""
5174   container = _execute.make_str(container, "container")
5175   if shared_name is None:
5176     shared_name = ""
5177   shared_name = _execute.make_str(shared_name, "shared_name")
5178   _attr_dtypes, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)
5179   _inputs_flat = list(values)
5180   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
5181   _attr_dtypes, "container", container, "shared_name", shared_name)
5182   _result = _execute.execute(b"Stage", 0, inputs=_inputs_flat, attrs=_attrs,
5183                              ctx=_ctx, name=name)
5184   _result = None
5185   return _result
5186 
5187 
5188 def stage_clear(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
5189   r"""Op removes all elements in the underlying container.
5190 
5191   Args:
5192     dtypes: A list of `tf.DTypes`.
5193     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
5194     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
5195     container: An optional `string`. Defaults to `""`.
5196     shared_name: An optional `string`. Defaults to `""`.
5197     name: A name for the operation (optional).
5198 
5199   Returns:
5200     The created Operation.
5201   """
5202   _ctx = _context._context
5203   if _ctx is None or not _ctx._eager_context.is_eager:
5204     if not isinstance(dtypes, (list, tuple)):
5205       raise TypeError(
5206           "Expected list for 'dtypes' argument to "
5207           "'stage_clear' Op, not %r." % dtypes)
5208     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
5209     if capacity is None:
5210       capacity = 0
5211     capacity = _execute.make_int(capacity, "capacity")
5212     if memory_limit is None:
5213       memory_limit = 0
5214     memory_limit = _execute.make_int(memory_limit, "memory_limit")
5215     if container is None:
5216       container = ""
5217     container = _execute.make_str(container, "container")
5218     if shared_name is None:
5219       shared_name = ""
5220     shared_name = _execute.make_str(shared_name, "shared_name")
5221     _, _, _op = _op_def_lib._apply_op_helper(
5222         "StageClear", dtypes=dtypes, capacity=capacity,
5223         memory_limit=memory_limit, container=container,
5224         shared_name=shared_name, name=name)
5225     return _op
5226     _result = None
5227     return _result
5228 
5229   else:
5230     try:
5231       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5232         _ctx._context_handle, _ctx._eager_context.device_name, "StageClear",
5233         name, _ctx._post_execution_callbacks, "capacity", capacity,
5234         "memory_limit", memory_limit, "dtypes", dtypes, "container",
5235         container, "shared_name", shared_name)
5236       return _result
5237     except _core._FallbackException:
5238       return stage_clear_eager_fallback(
5239           capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
5240           container=container, shared_name=shared_name, name=name, ctx=_ctx)
5241     except _core._NotOkStatusException as e:
5242       if name is not None:
5243         message = e.message + " name: " + name
5244       else:
5245         message = e.message
5246       _six.raise_from(_core._status_to_exception(e.code, message), None)
5247 
5248 
5249 def stage_clear_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
5250   r"""This is the slowpath function for Eager mode.
5251   This is for function stage_clear
5252   """
5253   _ctx = ctx if ctx else _context.context()
5254   if not isinstance(dtypes, (list, tuple)):
5255     raise TypeError(
5256         "Expected list for 'dtypes' argument to "
5257         "'stage_clear' Op, not %r." % dtypes)
5258   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
5259   if capacity is None:
5260     capacity = 0
5261   capacity = _execute.make_int(capacity, "capacity")
5262   if memory_limit is None:
5263     memory_limit = 0
5264   memory_limit = _execute.make_int(memory_limit, "memory_limit")
5265   if container is None:
5266     container = ""
5267   container = _execute.make_str(container, "container")
5268   if shared_name is None:
5269     shared_name = ""
5270   shared_name = _execute.make_str(shared_name, "shared_name")
5271   _inputs_flat = []
5272   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
5273   dtypes, "container", container, "shared_name", shared_name)
5274   _result = _execute.execute(b"StageClear", 0, inputs=_inputs_flat,
5275                              attrs=_attrs, ctx=_ctx, name=name)
5276   _result = None
5277   return _result
5278 
5279 
5280 def stage_peek(index, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
5281   r"""Op peeks at the values at the specified index.  If the
5282 
5283   underlying container does not contain sufficient elements
5284   this op will block until it does.   This Op is optimized for
5285   performance.
5286 
5287   Args:
5288     index: A `Tensor` of type `int32`.
5289     dtypes: A list of `tf.DTypes` that has length `>= 1`.
5290     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
5291     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
5292     container: An optional `string`. Defaults to `""`.
5293     shared_name: An optional `string`. Defaults to `""`.
5294     name: A name for the operation (optional).
5295 
5296   Returns:
5297     A list of `Tensor` objects of type `dtypes`.
5298   """
5299   _ctx = _context._context
5300   if _ctx is None or not _ctx._eager_context.is_eager:
5301     if not isinstance(dtypes, (list, tuple)):
5302       raise TypeError(
5303           "Expected list for 'dtypes' argument to "
5304           "'stage_peek' Op, not %r." % dtypes)
5305     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
5306     if capacity is None:
5307       capacity = 0
5308     capacity = _execute.make_int(capacity, "capacity")
5309     if memory_limit is None:
5310       memory_limit = 0
5311     memory_limit = _execute.make_int(memory_limit, "memory_limit")
5312     if container is None:
5313       container = ""
5314     container = _execute.make_str(container, "container")
5315     if shared_name is None:
5316       shared_name = ""
5317     shared_name = _execute.make_str(shared_name, "shared_name")
5318     _, _, _op = _op_def_lib._apply_op_helper(
5319         "StagePeek", index=index, dtypes=dtypes, capacity=capacity,
5320         memory_limit=memory_limit, container=container,
5321         shared_name=shared_name, name=name)
5322     _result = _op.outputs[:]
5323     if not _result:
5324       return _op
5325     _inputs_flat = _op.inputs
5326     _attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
5327               _op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
5328               "container", _op.get_attr("container"), "shared_name",
5329               _op.get_attr("shared_name"))
5330     _execute.record_gradient(
5331       "StagePeek", _inputs_flat, _attrs, _result, name)
5332     return _result
5333 
5334   else:
5335     try:
5336       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5337         _ctx._context_handle, _ctx._eager_context.device_name, "StagePeek",
5338         name, _ctx._post_execution_callbacks, index, "capacity", capacity,
5339         "memory_limit", memory_limit, "dtypes", dtypes, "container",
5340         container, "shared_name", shared_name)
5341       return _result
5342     except _core._FallbackException:
5343       return stage_peek_eager_fallback(
5344           index, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
5345           container=container, shared_name=shared_name, name=name, ctx=_ctx)
5346     except _core._NotOkStatusException as e:
5347       if name is not None:
5348         message = e.message + " name: " + name
5349       else:
5350         message = e.message
5351       _six.raise_from(_core._status_to_exception(e.code, message), None)
5352 
5353 
5354 def stage_peek_eager_fallback(index, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
5355   r"""This is the slowpath function for Eager mode.
5356   This is for function stage_peek
5357   """
5358   _ctx = ctx if ctx else _context.context()
5359   if not isinstance(dtypes, (list, tuple)):
5360     raise TypeError(
5361         "Expected list for 'dtypes' argument to "
5362         "'stage_peek' Op, not %r." % dtypes)
5363   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
5364   if capacity is None:
5365     capacity = 0
5366   capacity = _execute.make_int(capacity, "capacity")
5367   if memory_limit is None:
5368     memory_limit = 0
5369   memory_limit = _execute.make_int(memory_limit, "memory_limit")
5370   if container is None:
5371     container = ""
5372   container = _execute.make_str(container, "container")
5373   if shared_name is None:
5374     shared_name = ""
5375   shared_name = _execute.make_str(shared_name, "shared_name")
5376   index = _ops.convert_to_tensor(index, _dtypes.int32)
5377   _inputs_flat = [index]
5378   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
5379   dtypes, "container", container, "shared_name", shared_name)
5380   _result = _execute.execute(b"StagePeek", len(dtypes), inputs=_inputs_flat,
5381                              attrs=_attrs, ctx=_ctx, name=name)
5382   _execute.record_gradient(
5383       "StagePeek", _inputs_flat, _attrs, _result, name)
5384   return _result
5385 
5386 
5387 def stage_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
5388   r"""Op returns the number of elements in the underlying container.
5389 
5390   Args:
5391     dtypes: A list of `tf.DTypes`.
5392     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
5393     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
5394     container: An optional `string`. Defaults to `""`.
5395     shared_name: An optional `string`. Defaults to `""`.
5396     name: A name for the operation (optional).
5397 
5398   Returns:
5399     A `Tensor` of type `int32`.
5400   """
5401   _ctx = _context._context
5402   if _ctx is None or not _ctx._eager_context.is_eager:
5403     if not isinstance(dtypes, (list, tuple)):
5404       raise TypeError(
5405           "Expected list for 'dtypes' argument to "
5406           "'stage_size' Op, not %r." % dtypes)
5407     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
5408     if capacity is None:
5409       capacity = 0
5410     capacity = _execute.make_int(capacity, "capacity")
5411     if memory_limit is None:
5412       memory_limit = 0
5413     memory_limit = _execute.make_int(memory_limit, "memory_limit")
5414     if container is None:
5415       container = ""
5416     container = _execute.make_str(container, "container")
5417     if shared_name is None:
5418       shared_name = ""
5419     shared_name = _execute.make_str(shared_name, "shared_name")
5420     _, _, _op = _op_def_lib._apply_op_helper(
5421         "StageSize", dtypes=dtypes, capacity=capacity,
5422         memory_limit=memory_limit, container=container,
5423         shared_name=shared_name, name=name)
5424     _result = _op.outputs[:]
5425     _inputs_flat = _op.inputs
5426     _attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
5427               _op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
5428               "container", _op.get_attr("container"), "shared_name",
5429               _op.get_attr("shared_name"))
5430     _execute.record_gradient(
5431       "StageSize", _inputs_flat, _attrs, _result, name)
5432     _result, = _result
5433     return _result
5434 
5435   else:
5436     try:
5437       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5438         _ctx._context_handle, _ctx._eager_context.device_name, "StageSize",
5439         name, _ctx._post_execution_callbacks, "capacity", capacity,
5440         "memory_limit", memory_limit, "dtypes", dtypes, "container",
5441         container, "shared_name", shared_name)
5442       return _result
5443     except _core._FallbackException:
5444       return stage_size_eager_fallback(
5445           capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
5446           container=container, shared_name=shared_name, name=name, ctx=_ctx)
5447     except _core._NotOkStatusException as e:
5448       if name is not None:
5449         message = e.message + " name: " + name
5450       else:
5451         message = e.message
5452       _six.raise_from(_core._status_to_exception(e.code, message), None)
5453 
5454 
5455 def stage_size_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
5456   r"""This is the slowpath function for Eager mode.
5457   This is for function stage_size
5458   """
5459   _ctx = ctx if ctx else _context.context()
5460   if not isinstance(dtypes, (list, tuple)):
5461     raise TypeError(
5462         "Expected list for 'dtypes' argument to "
5463         "'stage_size' Op, not %r." % dtypes)
5464   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
5465   if capacity is None:
5466     capacity = 0
5467   capacity = _execute.make_int(capacity, "capacity")
5468   if memory_limit is None:
5469     memory_limit = 0
5470   memory_limit = _execute.make_int(memory_limit, "memory_limit")
5471   if container is None:
5472     container = ""
5473   container = _execute.make_str(container, "container")
5474   if shared_name is None:
5475     shared_name = ""
5476   shared_name = _execute.make_str(shared_name, "shared_name")
5477   _inputs_flat = []
5478   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
5479   dtypes, "container", container, "shared_name", shared_name)
5480   _result = _execute.execute(b"StageSize", 1, inputs=_inputs_flat,
5481                              attrs=_attrs, ctx=_ctx, name=name)
5482   _execute.record_gradient(
5483       "StageSize", _inputs_flat, _attrs, _result, name)
5484   _result, = _result
5485   return _result
5486 
5487 
5488 def tensor_array(size, dtype, dynamic_size=False, clear_after_read=True, tensor_array_name="", element_shape=None, name=None):
5489   r"""TODO: add doc.
5490 
5491   Args:
5492     size: A `Tensor` of type `int32`.
5493     dtype: A `tf.DType`.
5494     dynamic_size: An optional `bool`. Defaults to `False`.
5495     clear_after_read: An optional `bool`. Defaults to `True`.
5496     tensor_array_name: An optional `string`. Defaults to `""`.
5497     element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
5498     name: A name for the operation (optional).
5499 
5500   Returns:
5501     A `Tensor` of type mutable `string`.
5502   """
5503   _ctx = _context._context
5504   if _ctx is None or not _ctx._eager_context.is_eager:
5505     dtype = _execute.make_type(dtype, "dtype")
5506     if dynamic_size is None:
5507       dynamic_size = False
5508     dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size")
5509     if clear_after_read is None:
5510       clear_after_read = True
5511     clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read")
5512     if tensor_array_name is None:
5513       tensor_array_name = ""
5514     tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name")
5515     if element_shape is None:
5516       element_shape = None
5517     element_shape = _execute.make_shape(element_shape, "element_shape")
5518     _, _, _op = _op_def_lib._apply_op_helper(
5519         "TensorArray", size=size, dtype=dtype, dynamic_size=dynamic_size,
5520         clear_after_read=clear_after_read,
5521         tensor_array_name=tensor_array_name, element_shape=element_shape,
5522         name=name)
5523     _result = _op.outputs[:]
5524     _inputs_flat = _op.inputs
5525     _attrs = ("dtype", _op.get_attr("dtype"), "dynamic_size",
5526               _op.get_attr("dynamic_size"), "clear_after_read",
5527               _op.get_attr("clear_after_read"), "tensor_array_name",
5528               _op.get_attr("tensor_array_name"), "element_shape",
5529               _op.get_attr("element_shape"))
5530     _execute.record_gradient(
5531       "TensorArray", _inputs_flat, _attrs, _result, name)
5532     _result, = _result
5533     return _result
5534 
5535   else:
5536     raise RuntimeError("tensor_array op does not support eager execution. Arg 'handle' is a ref.")
5537 
5538 
5539   raise RuntimeError("tensor_array op does not support eager execution. Arg 'handle' is a ref.")
5540 
5541 def tensor_array_close(handle, name=None):
5542   r"""TODO: add doc.
5543 
5544   Args:
5545     handle: A `Tensor` of type mutable `string`.
5546     name: A name for the operation (optional).
5547 
5548   Returns:
5549     The created Operation.
5550   """
5551   _ctx = _context._context
5552   if _ctx is None or not _ctx._eager_context.is_eager:
5553     _, _, _op = _op_def_lib._apply_op_helper(
5554         "TensorArrayClose", handle=handle, name=name)
5555     return _op
5556     _result = None
5557     return _result
5558 
5559   else:
5560     raise RuntimeError("tensor_array_close op does not support eager execution. Arg 'handle' is a ref.")
5561 
5562 
5563   raise RuntimeError("tensor_array_close op does not support eager execution. Arg 'handle' is a ref.")
5564 
5565 def tensor_array_close_v2(handle, name=None):
5566   r"""Deprecated. Use TensorArrayCloseV3
5567 
5568   Args:
5569     handle: A `Tensor` of type `string`.
5570     name: A name for the operation (optional).
5571 
5572   Returns:
5573     The created Operation.
5574   """
5575   _ctx = _context._context
5576   if _ctx is None or not _ctx._eager_context.is_eager:
5577     _, _, _op = _op_def_lib._apply_op_helper(
5578         "TensorArrayCloseV2", handle=handle, name=name)
5579     return _op
5580     _result = None
5581     return _result
5582 
5583   else:
5584     try:
5585       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5586         _ctx._context_handle, _ctx._eager_context.device_name,
5587         "TensorArrayCloseV2", name, _ctx._post_execution_callbacks, handle)
5588       return _result
5589     except _core._FallbackException:
5590       return tensor_array_close_v2_eager_fallback(
5591           handle, name=name, ctx=_ctx)
5592     except _core._NotOkStatusException as e:
5593       if name is not None:
5594         message = e.message + " name: " + name
5595       else:
5596         message = e.message
5597       _six.raise_from(_core._status_to_exception(e.code, message), None)
5598 
5599 
5600 def tensor_array_close_v2_eager_fallback(handle, name=None, ctx=None):
5601   r"""This is the slowpath function for Eager mode.
5602   This is for function tensor_array_close_v2
5603   """
5604   _ctx = ctx if ctx else _context.context()
5605   handle = _ops.convert_to_tensor(handle, _dtypes.string)
5606   _inputs_flat = [handle]
5607   _attrs = None
5608   _result = _execute.execute(b"TensorArrayCloseV2", 0, inputs=_inputs_flat,
5609                              attrs=_attrs, ctx=_ctx, name=name)
5610   _result = None
5611   return _result
5612 
5613 
5614 def tensor_array_close_v3(handle, name=None):
5615   r"""Delete the TensorArray from its resource container.
5616 
5617   This enables the user to close and release the resource in the middle
5618   of a step/run.
5619 
5620   Args:
5621     handle: A `Tensor` of type `resource`.
5622       The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
5623     name: A name for the operation (optional).
5624 
5625   Returns:
5626     The created Operation.
5627   """
5628   _ctx = _context._context
5629   if _ctx is None or not _ctx._eager_context.is_eager:
5630     _, _, _op = _op_def_lib._apply_op_helper(
5631         "TensorArrayCloseV3", handle=handle, name=name)
5632     return _op
5633     _result = None
5634     return _result
5635 
5636   else:
5637     try:
5638       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5639         _ctx._context_handle, _ctx._eager_context.device_name,
5640         "TensorArrayCloseV3", name, _ctx._post_execution_callbacks, handle)
5641       return _result
5642     except _core._FallbackException:
5643       return tensor_array_close_v3_eager_fallback(
5644           handle, name=name, ctx=_ctx)
5645     except _core._NotOkStatusException as e:
5646       if name is not None:
5647         message = e.message + " name: " + name
5648       else:
5649         message = e.message
5650       _six.raise_from(_core._status_to_exception(e.code, message), None)
5651 
5652 
5653 def tensor_array_close_v3_eager_fallback(handle, name=None, ctx=None):
5654   r"""This is the slowpath function for Eager mode.
5655   This is for function tensor_array_close_v3
5656   """
5657   _ctx = ctx if ctx else _context.context()
5658   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
5659   _inputs_flat = [handle]
5660   _attrs = None
5661   _result = _execute.execute(b"TensorArrayCloseV3", 0, inputs=_inputs_flat,
5662                              attrs=_attrs, ctx=_ctx, name=name)
5663   _result = None
5664   return _result
5665 
5666 
5667 _tensor_array_concat_outputs = ["value", "lengths"]
5668 _TensorArrayConcatOutput = _collections.namedtuple(
5669     "TensorArrayConcat", _tensor_array_concat_outputs)
5670 
5671 
5672 def tensor_array_concat(handle, flow_in, dtype, element_shape_except0=None, name=None):
5673   r"""TODO: add doc.
5674 
5675   Args:
5676     handle: A `Tensor` of type mutable `string`.
5677     flow_in: A `Tensor` of type `float32`.
5678     dtype: A `tf.DType`.
5679     element_shape_except0: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
5680     name: A name for the operation (optional).
5681 
5682   Returns:
5683     A tuple of `Tensor` objects (value, lengths).
5684 
5685     value: A `Tensor` of type `dtype`.
5686     lengths: A `Tensor` of type `int64`.
5687   """
5688   _ctx = _context._context
5689   if _ctx is None or not _ctx._eager_context.is_eager:
5690     dtype = _execute.make_type(dtype, "dtype")
5691     if element_shape_except0 is None:
5692       element_shape_except0 = None
5693     element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0")
5694     _, _, _op = _op_def_lib._apply_op_helper(
5695         "TensorArrayConcat", handle=handle, flow_in=flow_in, dtype=dtype,
5696         element_shape_except0=element_shape_except0, name=name)
5697     _result = _op.outputs[:]
5698     _inputs_flat = _op.inputs
5699     _attrs = ("dtype", _op.get_attr("dtype"), "element_shape_except0",
5700               _op.get_attr("element_shape_except0"))
5701     _execute.record_gradient(
5702       "TensorArrayConcat", _inputs_flat, _attrs, _result, name)
5703     _result = _TensorArrayConcatOutput._make(_result)
5704     return _result
5705 
5706   else:
5707     raise RuntimeError("tensor_array_concat op does not support eager execution. Arg 'handle' is a ref.")
5708 
5709 
5710   raise RuntimeError("tensor_array_concat op does not support eager execution. Arg 'handle' is a ref.")
5711 
5712 _tensor_array_concat_v2_outputs = ["value", "lengths"]
5713 _TensorArrayConcatV2Output = _collections.namedtuple(
5714     "TensorArrayConcatV2", _tensor_array_concat_v2_outputs)
5715 
5716 
5717 def tensor_array_concat_v2(handle, flow_in, dtype, element_shape_except0=None, name=None):
5718   r"""Deprecated. Use TensorArrayConcatV3
5719 
5720   Args:
5721     handle: A `Tensor` of type `string`.
5722     flow_in: A `Tensor` of type `float32`.
5723     dtype: A `tf.DType`.
5724     element_shape_except0: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
5725     name: A name for the operation (optional).
5726 
5727   Returns:
5728     A tuple of `Tensor` objects (value, lengths).
5729 
5730     value: A `Tensor` of type `dtype`.
5731     lengths: A `Tensor` of type `int64`.
5732   """
5733   _ctx = _context._context
5734   if _ctx is None or not _ctx._eager_context.is_eager:
5735     dtype = _execute.make_type(dtype, "dtype")
5736     if element_shape_except0 is None:
5737       element_shape_except0 = None
5738     element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0")
5739     _, _, _op = _op_def_lib._apply_op_helper(
5740         "TensorArrayConcatV2", handle=handle, flow_in=flow_in, dtype=dtype,
5741         element_shape_except0=element_shape_except0, name=name)
5742     _result = _op.outputs[:]
5743     _inputs_flat = _op.inputs
5744     _attrs = ("dtype", _op.get_attr("dtype"), "element_shape_except0",
5745               _op.get_attr("element_shape_except0"))
5746     _execute.record_gradient(
5747       "TensorArrayConcatV2", _inputs_flat, _attrs, _result, name)
5748     _result = _TensorArrayConcatV2Output._make(_result)
5749     return _result
5750 
5751   else:
5752     try:
5753       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5754         _ctx._context_handle, _ctx._eager_context.device_name,
5755         "TensorArrayConcatV2", name, _ctx._post_execution_callbacks, handle,
5756         flow_in, "dtype", dtype, "element_shape_except0",
5757         element_shape_except0)
5758       _result = _TensorArrayConcatV2Output._make(_result)
5759       return _result
5760     except _core._FallbackException:
5761       return tensor_array_concat_v2_eager_fallback(
5762           handle, flow_in, dtype=dtype,
5763           element_shape_except0=element_shape_except0, name=name, ctx=_ctx)
5764     except _core._NotOkStatusException as e:
5765       if name is not None:
5766         message = e.message + " name: " + name
5767       else:
5768         message = e.message
5769       _six.raise_from(_core._status_to_exception(e.code, message), None)
5770 
5771 
5772 def tensor_array_concat_v2_eager_fallback(handle, flow_in, dtype, element_shape_except0=None, name=None, ctx=None):
5773   r"""This is the slowpath function for Eager mode.
5774   This is for function tensor_array_concat_v2
5775   """
5776   _ctx = ctx if ctx else _context.context()
5777   dtype = _execute.make_type(dtype, "dtype")
5778   if element_shape_except0 is None:
5779     element_shape_except0 = None
5780   element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0")
5781   handle = _ops.convert_to_tensor(handle, _dtypes.string)
5782   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
5783   _inputs_flat = [handle, flow_in]
5784   _attrs = ("dtype", dtype, "element_shape_except0", element_shape_except0)
5785   _result = _execute.execute(b"TensorArrayConcatV2", 2, inputs=_inputs_flat,
5786                              attrs=_attrs, ctx=_ctx, name=name)
5787   _execute.record_gradient(
5788       "TensorArrayConcatV2", _inputs_flat, _attrs, _result, name)
5789   _result = _TensorArrayConcatV2Output._make(_result)
5790   return _result
5791 
5792 
5793 _tensor_array_concat_v3_outputs = ["value", "lengths"]
5794 _TensorArrayConcatV3Output = _collections.namedtuple(
5795     "TensorArrayConcatV3", _tensor_array_concat_v3_outputs)
5796 
5797 
5798 def tensor_array_concat_v3(handle, flow_in, dtype, element_shape_except0=None, name=None):
5799   r"""Concat the elements from the TensorArray into value `value`.
5800 
5801   Takes `T` elements of shapes
5802 
5803     ```
5804     (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
5805     ```
5806 
5807   and concatenates them into a Tensor of shape:
5808 
5809     ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
5810 
5811   All elements must have the same shape (excepting the first dimension).
5812 
5813   Args:
5814     handle: A `Tensor` of type `resource`. The handle to a TensorArray.
5815     flow_in: A `Tensor` of type `float32`.
5816       A float scalar that enforces proper chaining of operations.
5817     dtype: A `tf.DType`. The type of the elem that is returned.
5818     element_shape_except0: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
5819       The expected shape of an element, if known,
5820       excluding the first dimension. Used to validate the shapes of
5821       TensorArray elements. If this shape is not fully specified, concatenating
5822       zero-size TensorArrays is an error.
5823     name: A name for the operation (optional).
5824 
5825   Returns:
5826     A tuple of `Tensor` objects (value, lengths).
5827 
5828     value: A `Tensor` of type `dtype`.
5829     lengths: A `Tensor` of type `int64`.
5830   """
5831   _ctx = _context._context
5832   if _ctx is None or not _ctx._eager_context.is_eager:
5833     dtype = _execute.make_type(dtype, "dtype")
5834     if element_shape_except0 is None:
5835       element_shape_except0 = None
5836     element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0")
5837     _, _, _op = _op_def_lib._apply_op_helper(
5838         "TensorArrayConcatV3", handle=handle, flow_in=flow_in, dtype=dtype,
5839         element_shape_except0=element_shape_except0, name=name)
5840     _result = _op.outputs[:]
5841     _inputs_flat = _op.inputs
5842     _attrs = ("dtype", _op.get_attr("dtype"), "element_shape_except0",
5843               _op.get_attr("element_shape_except0"))
5844     _execute.record_gradient(
5845       "TensorArrayConcatV3", _inputs_flat, _attrs, _result, name)
5846     _result = _TensorArrayConcatV3Output._make(_result)
5847     return _result
5848 
5849   else:
5850     try:
5851       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5852         _ctx._context_handle, _ctx._eager_context.device_name,
5853         "TensorArrayConcatV3", name, _ctx._post_execution_callbacks, handle,
5854         flow_in, "dtype", dtype, "element_shape_except0",
5855         element_shape_except0)
5856       _result = _TensorArrayConcatV3Output._make(_result)
5857       return _result
5858     except _core._FallbackException:
5859       return tensor_array_concat_v3_eager_fallback(
5860           handle, flow_in, dtype=dtype,
5861           element_shape_except0=element_shape_except0, name=name, ctx=_ctx)
5862     except _core._NotOkStatusException as e:
5863       if name is not None:
5864         message = e.message + " name: " + name
5865       else:
5866         message = e.message
5867       _six.raise_from(_core._status_to_exception(e.code, message), None)
5868 
5869 
5870 def tensor_array_concat_v3_eager_fallback(handle, flow_in, dtype, element_shape_except0=None, name=None, ctx=None):
5871   r"""This is the slowpath function for Eager mode.
5872   This is for function tensor_array_concat_v3
5873   """
5874   _ctx = ctx if ctx else _context.context()
5875   dtype = _execute.make_type(dtype, "dtype")
5876   if element_shape_except0 is None:
5877     element_shape_except0 = None
5878   element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0")
5879   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
5880   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
5881   _inputs_flat = [handle, flow_in]
5882   _attrs = ("dtype", dtype, "element_shape_except0", element_shape_except0)
5883   _result = _execute.execute(b"TensorArrayConcatV3", 2, inputs=_inputs_flat,
5884                              attrs=_attrs, ctx=_ctx, name=name)
5885   _execute.record_gradient(
5886       "TensorArrayConcatV3", _inputs_flat, _attrs, _result, name)
5887   _result = _TensorArrayConcatV3Output._make(_result)
5888   return _result
5889 
5890 
5891 def tensor_array_gather(handle, indices, flow_in, dtype, element_shape=None, name=None):
5892   r"""TODO: add doc.
5893 
5894   Args:
5895     handle: A `Tensor` of type mutable `string`.
5896     indices: A `Tensor` of type `int32`.
5897     flow_in: A `Tensor` of type `float32`.
5898     dtype: A `tf.DType`.
5899     element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
5900     name: A name for the operation (optional).
5901 
5902   Returns:
5903     A `Tensor` of type `dtype`.
5904   """
5905   _ctx = _context._context
5906   if _ctx is None or not _ctx._eager_context.is_eager:
5907     dtype = _execute.make_type(dtype, "dtype")
5908     if element_shape is None:
5909       element_shape = None
5910     element_shape = _execute.make_shape(element_shape, "element_shape")
5911     _, _, _op = _op_def_lib._apply_op_helper(
5912         "TensorArrayGather", handle=handle, indices=indices, flow_in=flow_in,
5913         dtype=dtype, element_shape=element_shape, name=name)
5914     _result = _op.outputs[:]
5915     _inputs_flat = _op.inputs
5916     _attrs = ("dtype", _op.get_attr("dtype"), "element_shape",
5917               _op.get_attr("element_shape"))
5918     _execute.record_gradient(
5919       "TensorArrayGather", _inputs_flat, _attrs, _result, name)
5920     _result, = _result
5921     return _result
5922 
5923   else:
5924     raise RuntimeError("tensor_array_gather op does not support eager execution. Arg 'handle' is a ref.")
5925 
5926 
5927   raise RuntimeError("tensor_array_gather op does not support eager execution. Arg 'handle' is a ref.")
5928 
5929 def tensor_array_gather_v2(handle, indices, flow_in, dtype, element_shape=None, name=None):
5930   r"""Deprecated. Use TensorArrayGatherV3
5931 
5932   Args:
5933     handle: A `Tensor` of type `string`.
5934     indices: A `Tensor` of type `int32`.
5935     flow_in: A `Tensor` of type `float32`.
5936     dtype: A `tf.DType`.
5937     element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
5938     name: A name for the operation (optional).
5939 
5940   Returns:
5941     A `Tensor` of type `dtype`.
5942   """
5943   _ctx = _context._context
5944   if _ctx is None or not _ctx._eager_context.is_eager:
5945     dtype = _execute.make_type(dtype, "dtype")
5946     if element_shape is None:
5947       element_shape = None
5948     element_shape = _execute.make_shape(element_shape, "element_shape")
5949     _, _, _op = _op_def_lib._apply_op_helper(
5950         "TensorArrayGatherV2", handle=handle, indices=indices,
5951         flow_in=flow_in, dtype=dtype, element_shape=element_shape, name=name)
5952     _result = _op.outputs[:]
5953     _inputs_flat = _op.inputs
5954     _attrs = ("dtype", _op.get_attr("dtype"), "element_shape",
5955               _op.get_attr("element_shape"))
5956     _execute.record_gradient(
5957       "TensorArrayGatherV2", _inputs_flat, _attrs, _result, name)
5958     _result, = _result
5959     return _result
5960 
5961   else:
5962     try:
5963       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5964         _ctx._context_handle, _ctx._eager_context.device_name,
5965         "TensorArrayGatherV2", name, _ctx._post_execution_callbacks, handle,
5966         indices, flow_in, "dtype", dtype, "element_shape", element_shape)
5967       return _result
5968     except _core._FallbackException:
5969       return tensor_array_gather_v2_eager_fallback(
5970           handle, indices, flow_in, dtype=dtype, element_shape=element_shape,
5971           name=name, ctx=_ctx)
5972     except _core._NotOkStatusException as e:
5973       if name is not None:
5974         message = e.message + " name: " + name
5975       else:
5976         message = e.message
5977       _six.raise_from(_core._status_to_exception(e.code, message), None)
5978 
5979 
5980 def tensor_array_gather_v2_eager_fallback(handle, indices, flow_in, dtype, element_shape=None, name=None, ctx=None):
5981   r"""This is the slowpath function for Eager mode.
5982   This is for function tensor_array_gather_v2
5983   """
5984   _ctx = ctx if ctx else _context.context()
5985   dtype = _execute.make_type(dtype, "dtype")
5986   if element_shape is None:
5987     element_shape = None
5988   element_shape = _execute.make_shape(element_shape, "element_shape")
5989   handle = _ops.convert_to_tensor(handle, _dtypes.string)
5990   indices = _ops.convert_to_tensor(indices, _dtypes.int32)
5991   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
5992   _inputs_flat = [handle, indices, flow_in]
5993   _attrs = ("dtype", dtype, "element_shape", element_shape)
5994   _result = _execute.execute(b"TensorArrayGatherV2", 1, inputs=_inputs_flat,
5995                              attrs=_attrs, ctx=_ctx, name=name)
5996   _execute.record_gradient(
5997       "TensorArrayGatherV2", _inputs_flat, _attrs, _result, name)
5998   _result, = _result
5999   return _result
6000 
6001 
6002 def tensor_array_gather_v3(handle, indices, flow_in, dtype, element_shape=None, name=None):
6003   r"""Gather specific elements from the TensorArray into output `value`.
6004 
6005   All elements selected by `indices` must have the same shape.
6006 
6007   Args:
6008     handle: A `Tensor` of type `resource`. The handle to a TensorArray.
6009     indices: A `Tensor` of type `int32`.
6010       The locations in the TensorArray from which to read tensor elements.
6011     flow_in: A `Tensor` of type `float32`.
6012       A float scalar that enforces proper chaining of operations.
6013     dtype: A `tf.DType`. The type of the elem that is returned.
6014     element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
6015       The expected shape of an element, if known. Used to
6016       validate the shapes of TensorArray elements. If this shape is not
6017       fully specified, gathering zero-size TensorArrays is an error.
6018     name: A name for the operation (optional).
6019 
6020   Returns:
6021     A `Tensor` of type `dtype`.
6022   """
6023   _ctx = _context._context
6024   if _ctx is None or not _ctx._eager_context.is_eager:
6025     dtype = _execute.make_type(dtype, "dtype")
6026     if element_shape is None:
6027       element_shape = None
6028     element_shape = _execute.make_shape(element_shape, "element_shape")
6029     _, _, _op = _op_def_lib._apply_op_helper(
6030         "TensorArrayGatherV3", handle=handle, indices=indices,
6031         flow_in=flow_in, dtype=dtype, element_shape=element_shape, name=name)
6032     _result = _op.outputs[:]
6033     _inputs_flat = _op.inputs
6034     _attrs = ("dtype", _op.get_attr("dtype"), "element_shape",
6035               _op.get_attr("element_shape"))
6036     _execute.record_gradient(
6037       "TensorArrayGatherV3", _inputs_flat, _attrs, _result, name)
6038     _result, = _result
6039     return _result
6040 
6041   else:
6042     try:
6043       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6044         _ctx._context_handle, _ctx._eager_context.device_name,
6045         "TensorArrayGatherV3", name, _ctx._post_execution_callbacks, handle,
6046         indices, flow_in, "dtype", dtype, "element_shape", element_shape)
6047       return _result
6048     except _core._FallbackException:
6049       return tensor_array_gather_v3_eager_fallback(
6050           handle, indices, flow_in, dtype=dtype, element_shape=element_shape,
6051           name=name, ctx=_ctx)
6052     except _core._NotOkStatusException as e:
6053       if name is not None:
6054         message = e.message + " name: " + name
6055       else:
6056         message = e.message
6057       _six.raise_from(_core._status_to_exception(e.code, message), None)
6058 
6059 
6060 def tensor_array_gather_v3_eager_fallback(handle, indices, flow_in, dtype, element_shape=None, name=None, ctx=None):
6061   r"""This is the slowpath function for Eager mode.
6062   This is for function tensor_array_gather_v3
6063   """
6064   _ctx = ctx if ctx else _context.context()
6065   dtype = _execute.make_type(dtype, "dtype")
6066   if element_shape is None:
6067     element_shape = None
6068   element_shape = _execute.make_shape(element_shape, "element_shape")
6069   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
6070   indices = _ops.convert_to_tensor(indices, _dtypes.int32)
6071   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
6072   _inputs_flat = [handle, indices, flow_in]
6073   _attrs = ("dtype", dtype, "element_shape", element_shape)
6074   _result = _execute.execute(b"TensorArrayGatherV3", 1, inputs=_inputs_flat,
6075                              attrs=_attrs, ctx=_ctx, name=name)
6076   _execute.record_gradient(
6077       "TensorArrayGatherV3", _inputs_flat, _attrs, _result, name)
6078   _result, = _result
6079   return _result
6080 
6081 
6082 def tensor_array_grad(handle, flow_in, source, name=None):
6083   r"""TODO: add doc.
6084 
6085   Args:
6086     handle: A `Tensor` of type `string`.
6087     flow_in: A `Tensor` of type `float32`.
6088     source: A `string`.
6089     name: A name for the operation (optional).
6090 
6091   Returns:
6092     A `Tensor` of type mutable `string`.
6093   """
6094   _ctx = _context._context
6095   if _ctx is None or not _ctx._eager_context.is_eager:
6096     source = _execute.make_str(source, "source")
6097     _, _, _op = _op_def_lib._apply_op_helper(
6098         "TensorArrayGrad", handle=handle, flow_in=flow_in, source=source,
6099         name=name)
6100     _result = _op.outputs[:]
6101     _inputs_flat = _op.inputs
6102     _attrs = ("source", _op.get_attr("source"))
6103     _execute.record_gradient(
6104       "TensorArrayGrad", _inputs_flat, _attrs, _result, name)
6105     _result, = _result
6106     return _result
6107 
6108   else:
6109     raise RuntimeError("tensor_array_grad op does not support eager execution. Arg 'grad_handle' is a ref.")
6110 
6111 
6112   raise RuntimeError("tensor_array_grad op does not support eager execution. Arg 'grad_handle' is a ref.")
6113 
6114 def tensor_array_grad_v2(handle, flow_in, source, name=None):
6115   r"""Deprecated. Use TensorArrayGradV3
6116 
6117   Args:
6118     handle: A `Tensor` of type `string`.
6119     flow_in: A `Tensor` of type `float32`.
6120     source: A `string`.
6121     name: A name for the operation (optional).
6122 
6123   Returns:
6124     A `Tensor` of type `string`.
6125   """
6126   _ctx = _context._context
6127   if _ctx is None or not _ctx._eager_context.is_eager:
6128     source = _execute.make_str(source, "source")
6129     _, _, _op = _op_def_lib._apply_op_helper(
6130         "TensorArrayGradV2", handle=handle, flow_in=flow_in, source=source,
6131         name=name)
6132     _result = _op.outputs[:]
6133     _inputs_flat = _op.inputs
6134     _attrs = ("source", _op.get_attr("source"))
6135     _execute.record_gradient(
6136       "TensorArrayGradV2", _inputs_flat, _attrs, _result, name)
6137     _result, = _result
6138     return _result
6139 
6140   else:
6141     try:
6142       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6143         _ctx._context_handle, _ctx._eager_context.device_name,
6144         "TensorArrayGradV2", name, _ctx._post_execution_callbacks, handle,
6145         flow_in, "source", source)
6146       return _result
6147     except _core._FallbackException:
6148       return tensor_array_grad_v2_eager_fallback(
6149           handle, flow_in, source=source, name=name, ctx=_ctx)
6150     except _core._NotOkStatusException as e:
6151       if name is not None:
6152         message = e.message + " name: " + name
6153       else:
6154         message = e.message
6155       _six.raise_from(_core._status_to_exception(e.code, message), None)
6156 
6157 
6158 def tensor_array_grad_v2_eager_fallback(handle, flow_in, source, name=None, ctx=None):
6159   r"""This is the slowpath function for Eager mode.
6160   This is for function tensor_array_grad_v2
6161   """
6162   _ctx = ctx if ctx else _context.context()
6163   source = _execute.make_str(source, "source")
6164   handle = _ops.convert_to_tensor(handle, _dtypes.string)
6165   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
6166   _inputs_flat = [handle, flow_in]
6167   _attrs = ("source", source)
6168   _result = _execute.execute(b"TensorArrayGradV2", 1, inputs=_inputs_flat,
6169                              attrs=_attrs, ctx=_ctx, name=name)
6170   _execute.record_gradient(
6171       "TensorArrayGradV2", _inputs_flat, _attrs, _result, name)
6172   _result, = _result
6173   return _result
6174 
6175 
6176 _tensor_array_grad_v3_outputs = ["grad_handle", "flow_out"]
6177 _TensorArrayGradV3Output = _collections.namedtuple(
6178     "TensorArrayGradV3", _tensor_array_grad_v3_outputs)
6179 
6180 
6181 def tensor_array_grad_v3(handle, flow_in, source, name=None):
6182   r"""Creates a TensorArray for storing the gradients of values in the given handle.
6183 
6184   If the given TensorArray gradient already exists, returns a reference to it.
6185 
6186   Locks the size of the original TensorArray by disabling its dynamic size flag.
6187 
6188   **A note about the input flow_in:**
6189 
6190   The handle flow_in forces the execution of the gradient lookup to occur
6191   only after certain other operations have occurred.  For example, when
6192   the forward TensorArray is dynamically sized, writes to this TensorArray
6193   may resize the object.  The gradient TensorArray is statically sized based
6194   on the size of the forward TensorArray when this operation executes.
6195   Furthermore, the size of the forward TensorArray is frozen by this call.
6196   As a result, the flow is used to ensure that the call to generate the gradient
6197   TensorArray only happens after all writes are executed.
6198 
6199   In the case of dynamically sized TensorArrays, gradient computation should
6200   only be performed on read operations that have themselves been chained via
6201   flow to occur only after all writes have executed. That way the final size
6202   of the forward TensorArray is known when this operation is called.
6203 
6204   **A note about the source attribute:**
6205 
6206   TensorArray gradient calls use an accumulator TensorArray object.  If
6207   multiple gradients are calculated and run in the same session, the multiple
6208   gradient nodes may accidentally flow through the same accumulator TensorArray.
6209   This double counts and generally breaks the TensorArray gradient flow.
6210 
6211   The solution is to identify which gradient call this particular
6212   TensorArray gradient is being called in.  This is performed by identifying
6213   a unique string (e.g. "gradients", "gradients_1", ...) from the input
6214   gradient Tensor's name.  This string is used as a suffix when creating
6215   the TensorArray gradient object here (the attribute `source`).
6216 
6217   The attribute `source` is added as a suffix to the forward TensorArray's
6218   name when performing the creation / lookup, so that each separate gradient
6219   calculation gets its own TensorArray accumulator.
6220 
6221   Args:
6222     handle: A `Tensor` of type `resource`.
6223       The handle to the forward TensorArray.
6224     flow_in: A `Tensor` of type `float32`.
6225       A float scalar that enforces proper chaining of operations.
6226     source: A `string`.
6227       The gradient source string, used to decide which gradient TensorArray
6228       to return.
6229     name: A name for the operation (optional).
6230 
6231   Returns:
6232     A tuple of `Tensor` objects (grad_handle, flow_out).
6233 
6234     grad_handle: A `Tensor` of type `resource`.
6235     flow_out: A `Tensor` of type `float32`.
6236   """
6237   _ctx = _context._context
6238   if _ctx is None or not _ctx._eager_context.is_eager:
6239     source = _execute.make_str(source, "source")
6240     _, _, _op = _op_def_lib._apply_op_helper(
6241         "TensorArrayGradV3", handle=handle, flow_in=flow_in, source=source,
6242         name=name)
6243     _result = _op.outputs[:]
6244     _inputs_flat = _op.inputs
6245     _attrs = ("source", _op.get_attr("source"))
6246     _execute.record_gradient(
6247       "TensorArrayGradV3", _inputs_flat, _attrs, _result, name)
6248     _result = _TensorArrayGradV3Output._make(_result)
6249     return _result
6250 
6251   else:
6252     try:
6253       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6254         _ctx._context_handle, _ctx._eager_context.device_name,
6255         "TensorArrayGradV3", name, _ctx._post_execution_callbacks, handle,
6256         flow_in, "source", source)
6257       _result = _TensorArrayGradV3Output._make(_result)
6258       return _result
6259     except _core._FallbackException:
6260       return tensor_array_grad_v3_eager_fallback(
6261           handle, flow_in, source=source, name=name, ctx=_ctx)
6262     except _core._NotOkStatusException as e:
6263       if name is not None:
6264         message = e.message + " name: " + name
6265       else:
6266         message = e.message
6267       _six.raise_from(_core._status_to_exception(e.code, message), None)
6268 
6269 
6270 def tensor_array_grad_v3_eager_fallback(handle, flow_in, source, name=None, ctx=None):
6271   r"""This is the slowpath function for Eager mode.
6272   This is for function tensor_array_grad_v3
6273   """
6274   _ctx = ctx if ctx else _context.context()
6275   source = _execute.make_str(source, "source")
6276   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
6277   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
6278   _inputs_flat = [handle, flow_in]
6279   _attrs = ("source", source)
6280   _result = _execute.execute(b"TensorArrayGradV3", 2, inputs=_inputs_flat,
6281                              attrs=_attrs, ctx=_ctx, name=name)
6282   _execute.record_gradient(
6283       "TensorArrayGradV3", _inputs_flat, _attrs, _result, name)
6284   _result = _TensorArrayGradV3Output._make(_result)
6285   return _result
6286 
6287 
6288 _tensor_array_grad_with_shape_outputs = ["grad_handle", "flow_out"]
6289 _TensorArrayGradWithShapeOutput = _collections.namedtuple(
6290     "TensorArrayGradWithShape", _tensor_array_grad_with_shape_outputs)
6291 
6292 
6293 def tensor_array_grad_with_shape(handle, flow_in, shape_to_prepend, source, name=None):
6294   r"""Creates a TensorArray for storing multiple gradients of values in the given handle.
6295 
6296   Similar to TensorArrayGradV3. However it creates an accumulator with an
6297   expanded shape compared to the input TensorArray whose gradient is being
6298   computed. This enables multiple gradients for the same TensorArray to be
6299   calculated using the same accumulator.
6300 
6301   Args:
6302     handle: A `Tensor` of type `resource`.
6303       The handle to the forward TensorArray.
6304     flow_in: A `Tensor` of type `float32`.
6305       A float scalar that enforces proper chaining of operations.
6306     shape_to_prepend: A `Tensor` of type `int32`.
6307       An int32 vector representing a shape. Elements in the gradient accumulator will
6308       have shape which is this shape_to_prepend value concatenated with shape of the
6309       elements in the TensorArray corresponding to the input handle.
6310     source: A `string`.
6311       The gradient source string, used to decide which gradient TensorArray
6312       to return.
6313     name: A name for the operation (optional).
6314 
6315   Returns:
6316     A tuple of `Tensor` objects (grad_handle, flow_out).
6317 
6318     grad_handle: A `Tensor` of type `resource`.
6319     flow_out: A `Tensor` of type `float32`.
6320   """
6321   _ctx = _context._context
6322   if _ctx is None or not _ctx._eager_context.is_eager:
6323     source = _execute.make_str(source, "source")
6324     _, _, _op = _op_def_lib._apply_op_helper(
6325         "TensorArrayGradWithShape", handle=handle, flow_in=flow_in,
6326         shape_to_prepend=shape_to_prepend, source=source, name=name)
6327     _result = _op.outputs[:]
6328     _inputs_flat = _op.inputs
6329     _attrs = ("source", _op.get_attr("source"))
6330     _execute.record_gradient(
6331       "TensorArrayGradWithShape", _inputs_flat, _attrs, _result, name)
6332     _result = _TensorArrayGradWithShapeOutput._make(_result)
6333     return _result
6334 
6335   else:
6336     try:
6337       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6338         _ctx._context_handle, _ctx._eager_context.device_name,
6339         "TensorArrayGradWithShape", name, _ctx._post_execution_callbacks,
6340         handle, flow_in, shape_to_prepend, "source", source)
6341       _result = _TensorArrayGradWithShapeOutput._make(_result)
6342       return _result
6343     except _core._FallbackException:
6344       return tensor_array_grad_with_shape_eager_fallback(
6345           handle, flow_in, shape_to_prepend, source=source, name=name,
6346           ctx=_ctx)
6347     except _core._NotOkStatusException as e:
6348       if name is not None:
6349         message = e.message + " name: " + name
6350       else:
6351         message = e.message
6352       _six.raise_from(_core._status_to_exception(e.code, message), None)
6353 
6354 
6355 def tensor_array_grad_with_shape_eager_fallback(handle, flow_in, shape_to_prepend, source, name=None, ctx=None):
6356   r"""This is the slowpath function for Eager mode.
6357   This is for function tensor_array_grad_with_shape
6358   """
6359   _ctx = ctx if ctx else _context.context()
6360   source = _execute.make_str(source, "source")
6361   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
6362   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
6363   shape_to_prepend = _ops.convert_to_tensor(shape_to_prepend, _dtypes.int32)
6364   _inputs_flat = [handle, flow_in, shape_to_prepend]
6365   _attrs = ("source", source)
6366   _result = _execute.execute(b"TensorArrayGradWithShape", 2,
6367                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
6368                              name=name)
6369   _execute.record_gradient(
6370       "TensorArrayGradWithShape", _inputs_flat, _attrs, _result, name)
6371   _result = _TensorArrayGradWithShapeOutput._make(_result)
6372   return _result
6373 
6374 
6375 def tensor_array_pack(handle, flow_in, dtype, element_shape=None, name=None):
6376   r"""TODO: add doc.
6377 
6378   Args:
6379     handle: A `Tensor` of type mutable `string`.
6380     flow_in: A `Tensor` of type `float32`.
6381     dtype: A `tf.DType`.
6382     element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
6383     name: A name for the operation (optional).
6384 
6385   Returns:
6386     A `Tensor` of type `dtype`.
6387   """
6388   _ctx = _context._context
6389   if _ctx is None or not _ctx._eager_context.is_eager:
6390     dtype = _execute.make_type(dtype, "dtype")
6391     if element_shape is None:
6392       element_shape = None
6393     element_shape = _execute.make_shape(element_shape, "element_shape")
6394     _, _, _op = _op_def_lib._apply_op_helper(
6395         "TensorArrayPack", handle=handle, flow_in=flow_in, dtype=dtype,
6396         element_shape=element_shape, name=name)
6397     _result = _op.outputs[:]
6398     _inputs_flat = _op.inputs
6399     _attrs = ("dtype", _op.get_attr("dtype"), "element_shape",
6400               _op.get_attr("element_shape"))
6401     _execute.record_gradient(
6402       "TensorArrayPack", _inputs_flat, _attrs, _result, name)
6403     _result, = _result
6404     return _result
6405 
6406   else:
6407     raise RuntimeError("tensor_array_pack op does not support eager execution. Arg 'handle' is a ref.")
6408 
6409 
6410   raise RuntimeError("tensor_array_pack op does not support eager execution. Arg 'handle' is a ref.")
6411 
6412 def tensor_array_read(handle, index, flow_in, dtype, name=None):
6413   r"""TODO: add doc.
6414 
6415   Args:
6416     handle: A `Tensor` of type mutable `string`.
6417     index: A `Tensor` of type `int32`.
6418     flow_in: A `Tensor` of type `float32`.
6419     dtype: A `tf.DType`.
6420     name: A name for the operation (optional).
6421 
6422   Returns:
6423     A `Tensor` of type `dtype`.
6424   """
6425   _ctx = _context._context
6426   if _ctx is None or not _ctx._eager_context.is_eager:
6427     dtype = _execute.make_type(dtype, "dtype")
6428     _, _, _op = _op_def_lib._apply_op_helper(
6429         "TensorArrayRead", handle=handle, index=index, flow_in=flow_in,
6430         dtype=dtype, name=name)
6431     _result = _op.outputs[:]
6432     _inputs_flat = _op.inputs
6433     _attrs = ("dtype", _op.get_attr("dtype"))
6434     _execute.record_gradient(
6435       "TensorArrayRead", _inputs_flat, _attrs, _result, name)
6436     _result, = _result
6437     return _result
6438 
6439   else:
6440     raise RuntimeError("tensor_array_read op does not support eager execution. Arg 'handle' is a ref.")
6441 
6442 
6443   raise RuntimeError("tensor_array_read op does not support eager execution. Arg 'handle' is a ref.")
6444 
6445 def tensor_array_read_v2(handle, index, flow_in, dtype, name=None):
6446   r"""Deprecated. Use TensorArrayReadV3
6447 
6448   Args:
6449     handle: A `Tensor` of type `string`.
6450     index: A `Tensor` of type `int32`.
6451     flow_in: A `Tensor` of type `float32`.
6452     dtype: A `tf.DType`.
6453     name: A name for the operation (optional).
6454 
6455   Returns:
6456     A `Tensor` of type `dtype`.
6457   """
6458   _ctx = _context._context
6459   if _ctx is None or not _ctx._eager_context.is_eager:
6460     dtype = _execute.make_type(dtype, "dtype")
6461     _, _, _op = _op_def_lib._apply_op_helper(
6462         "TensorArrayReadV2", handle=handle, index=index, flow_in=flow_in,
6463         dtype=dtype, name=name)
6464     _result = _op.outputs[:]
6465     _inputs_flat = _op.inputs
6466     _attrs = ("dtype", _op.get_attr("dtype"))
6467     _execute.record_gradient(
6468       "TensorArrayReadV2", _inputs_flat, _attrs, _result, name)
6469     _result, = _result
6470     return _result
6471 
6472   else:
6473     try:
6474       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6475         _ctx._context_handle, _ctx._eager_context.device_name,
6476         "TensorArrayReadV2", name, _ctx._post_execution_callbacks, handle,
6477         index, flow_in, "dtype", dtype)
6478       return _result
6479     except _core._FallbackException:
6480       return tensor_array_read_v2_eager_fallback(
6481           handle, index, flow_in, dtype=dtype, name=name, ctx=_ctx)
6482     except _core._NotOkStatusException as e:
6483       if name is not None:
6484         message = e.message + " name: " + name
6485       else:
6486         message = e.message
6487       _six.raise_from(_core._status_to_exception(e.code, message), None)
6488 
6489 
6490 def tensor_array_read_v2_eager_fallback(handle, index, flow_in, dtype, name=None, ctx=None):
6491   r"""This is the slowpath function for Eager mode.
6492   This is for function tensor_array_read_v2
6493   """
6494   _ctx = ctx if ctx else _context.context()
6495   dtype = _execute.make_type(dtype, "dtype")
6496   handle = _ops.convert_to_tensor(handle, _dtypes.string)
6497   index = _ops.convert_to_tensor(index, _dtypes.int32)
6498   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
6499   _inputs_flat = [handle, index, flow_in]
6500   _attrs = ("dtype", dtype)
6501   _result = _execute.execute(b"TensorArrayReadV2", 1, inputs=_inputs_flat,
6502                              attrs=_attrs, ctx=_ctx, name=name)
6503   _execute.record_gradient(
6504       "TensorArrayReadV2", _inputs_flat, _attrs, _result, name)
6505   _result, = _result
6506   return _result
6507 
6508 
6509 def tensor_array_read_v3(handle, index, flow_in, dtype, name=None):
6510   r"""Read an element from the TensorArray into output `value`.
6511 
6512   Args:
6513     handle: A `Tensor` of type `resource`. The handle to a TensorArray.
6514     index: A `Tensor` of type `int32`.
6515     flow_in: A `Tensor` of type `float32`.
6516       A float scalar that enforces proper chaining of operations.
6517     dtype: A `tf.DType`. The type of the elem that is returned.
6518     name: A name for the operation (optional).
6519 
6520   Returns:
6521     A `Tensor` of type `dtype`.
6522   """
6523   _ctx = _context._context
6524   if _ctx is None or not _ctx._eager_context.is_eager:
6525     dtype = _execute.make_type(dtype, "dtype")
6526     _, _, _op = _op_def_lib._apply_op_helper(
6527         "TensorArrayReadV3", handle=handle, index=index, flow_in=flow_in,
6528         dtype=dtype, name=name)
6529     _result = _op.outputs[:]
6530     _inputs_flat = _op.inputs
6531     _attrs = ("dtype", _op.get_attr("dtype"))
6532     _execute.record_gradient(
6533       "TensorArrayReadV3", _inputs_flat, _attrs, _result, name)
6534     _result, = _result
6535     return _result
6536 
6537   else:
6538     try:
6539       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6540         _ctx._context_handle, _ctx._eager_context.device_name,
6541         "TensorArrayReadV3", name, _ctx._post_execution_callbacks, handle,
6542         index, flow_in, "dtype", dtype)
6543       return _result
6544     except _core._FallbackException:
6545       return tensor_array_read_v3_eager_fallback(
6546           handle, index, flow_in, dtype=dtype, name=name, ctx=_ctx)
6547     except _core._NotOkStatusException as e:
6548       if name is not None:
6549         message = e.message + " name: " + name
6550       else:
6551         message = e.message
6552       _six.raise_from(_core._status_to_exception(e.code, message), None)
6553 
6554 
6555 def tensor_array_read_v3_eager_fallback(handle, index, flow_in, dtype, name=None, ctx=None):
6556   r"""This is the slowpath function for Eager mode.
6557   This is for function tensor_array_read_v3
6558   """
6559   _ctx = ctx if ctx else _context.context()
6560   dtype = _execute.make_type(dtype, "dtype")
6561   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
6562   index = _ops.convert_to_tensor(index, _dtypes.int32)
6563   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
6564   _inputs_flat = [handle, index, flow_in]
6565   _attrs = ("dtype", dtype)
6566   _result = _execute.execute(b"TensorArrayReadV3", 1, inputs=_inputs_flat,
6567                              attrs=_attrs, ctx=_ctx, name=name)
6568   _execute.record_gradient(
6569       "TensorArrayReadV3", _inputs_flat, _attrs, _result, name)
6570   _result, = _result
6571   return _result
6572 
6573 
6574 def tensor_array_scatter(handle, indices, value, flow_in, name=None):
6575   r"""TODO: add doc.
6576 
6577   Args:
6578     handle: A `Tensor` of type mutable `string`.
6579     indices: A `Tensor` of type `int32`.
6580     value: A `Tensor`.
6581     flow_in: A `Tensor` of type `float32`.
6582     name: A name for the operation (optional).
6583 
6584   Returns:
6585     A `Tensor` of type `float32`.
6586   """
6587   _ctx = _context._context
6588   if _ctx is None or not _ctx._eager_context.is_eager:
6589     _, _, _op = _op_def_lib._apply_op_helper(
6590         "TensorArrayScatter", handle=handle, indices=indices, value=value,
6591         flow_in=flow_in, name=name)
6592     _result = _op.outputs[:]
6593     _inputs_flat = _op.inputs
6594     _attrs = ("T", _op.get_attr("T"))
6595     _execute.record_gradient(
6596       "TensorArrayScatter", _inputs_flat, _attrs, _result, name)
6597     _result, = _result
6598     return _result
6599 
6600   else:
6601     raise RuntimeError("tensor_array_scatter op does not support eager execution. Arg 'handle' is a ref.")
6602 
6603 
6604   raise RuntimeError("tensor_array_scatter op does not support eager execution. Arg 'handle' is a ref.")
6605 
6606 def tensor_array_scatter_v2(handle, indices, value, flow_in, name=None):
6607   r"""Deprecated. Use TensorArrayScatterV3
6608 
6609   Args:
6610     handle: A `Tensor` of type `string`.
6611     indices: A `Tensor` of type `int32`.
6612     value: A `Tensor`.
6613     flow_in: A `Tensor` of type `float32`.
6614     name: A name for the operation (optional).
6615 
6616   Returns:
6617     A `Tensor` of type `float32`.
6618   """
6619   _ctx = _context._context
6620   if _ctx is None or not _ctx._eager_context.is_eager:
6621     _, _, _op = _op_def_lib._apply_op_helper(
6622         "TensorArrayScatterV2", handle=handle, indices=indices, value=value,
6623         flow_in=flow_in, name=name)
6624     _result = _op.outputs[:]
6625     _inputs_flat = _op.inputs
6626     _attrs = ("T", _op.get_attr("T"))
6627     _execute.record_gradient(
6628       "TensorArrayScatterV2", _inputs_flat, _attrs, _result, name)
6629     _result, = _result
6630     return _result
6631 
6632   else:
6633     try:
6634       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6635         _ctx._context_handle, _ctx._eager_context.device_name,
6636         "TensorArrayScatterV2", name, _ctx._post_execution_callbacks, handle,
6637         indices, value, flow_in)
6638       return _result
6639     except _core._FallbackException:
6640       return tensor_array_scatter_v2_eager_fallback(
6641           handle, indices, value, flow_in, name=name, ctx=_ctx)
6642     except _core._NotOkStatusException as e:
6643       if name is not None:
6644         message = e.message + " name: " + name
6645       else:
6646         message = e.message
6647       _six.raise_from(_core._status_to_exception(e.code, message), None)
6648 
6649 
6650 def tensor_array_scatter_v2_eager_fallback(handle, indices, value, flow_in, name=None, ctx=None):
6651   r"""This is the slowpath function for Eager mode.
6652   This is for function tensor_array_scatter_v2
6653   """
6654   _ctx = ctx if ctx else _context.context()
6655   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
6656   handle = _ops.convert_to_tensor(handle, _dtypes.string)
6657   indices = _ops.convert_to_tensor(indices, _dtypes.int32)
6658   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
6659   _inputs_flat = [handle, indices, value, flow_in]
6660   _attrs = ("T", _attr_T)
6661   _result = _execute.execute(b"TensorArrayScatterV2", 1, inputs=_inputs_flat,
6662                              attrs=_attrs, ctx=_ctx, name=name)
6663   _execute.record_gradient(
6664       "TensorArrayScatterV2", _inputs_flat, _attrs, _result, name)
6665   _result, = _result
6666   return _result
6667 
6668 
6669 def tensor_array_scatter_v3(handle, indices, value, flow_in, name=None):
6670   r"""Scatter the data from the input value into specific TensorArray elements.
6671 
6672   `indices` must be a vector, its length must match the first dim of `value`.
6673 
6674   Args:
6675     handle: A `Tensor` of type `resource`. The handle to a TensorArray.
6676     indices: A `Tensor` of type `int32`.
6677       The locations at which to write the tensor elements.
6678     value: A `Tensor`. The concatenated tensor to write to the TensorArray.
6679     flow_in: A `Tensor` of type `float32`.
6680       A float scalar that enforces proper chaining of operations.
6681     name: A name for the operation (optional).
6682 
6683   Returns:
6684     A `Tensor` of type `float32`.
6685   """
6686   _ctx = _context._context
6687   if _ctx is None or not _ctx._eager_context.is_eager:
6688     _, _, _op = _op_def_lib._apply_op_helper(
6689         "TensorArrayScatterV3", handle=handle, indices=indices, value=value,
6690         flow_in=flow_in, name=name)
6691     _result = _op.outputs[:]
6692     _inputs_flat = _op.inputs
6693     _attrs = ("T", _op.get_attr("T"))
6694     _execute.record_gradient(
6695       "TensorArrayScatterV3", _inputs_flat, _attrs, _result, name)
6696     _result, = _result
6697     return _result
6698 
6699   else:
6700     try:
6701       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6702         _ctx._context_handle, _ctx._eager_context.device_name,
6703         "TensorArrayScatterV3", name, _ctx._post_execution_callbacks, handle,
6704         indices, value, flow_in)
6705       return _result
6706     except _core._FallbackException:
6707       return tensor_array_scatter_v3_eager_fallback(
6708           handle, indices, value, flow_in, name=name, ctx=_ctx)
6709     except _core._NotOkStatusException as e:
6710       if name is not None:
6711         message = e.message + " name: " + name
6712       else:
6713         message = e.message
6714       _six.raise_from(_core._status_to_exception(e.code, message), None)
6715 
6716 
6717 def tensor_array_scatter_v3_eager_fallback(handle, indices, value, flow_in, name=None, ctx=None):
6718   r"""This is the slowpath function for Eager mode.
6719   This is for function tensor_array_scatter_v3
6720   """
6721   _ctx = ctx if ctx else _context.context()
6722   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
6723   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
6724   indices = _ops.convert_to_tensor(indices, _dtypes.int32)
6725   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
6726   _inputs_flat = [handle, indices, value, flow_in]
6727   _attrs = ("T", _attr_T)
6728   _result = _execute.execute(b"TensorArrayScatterV3", 1, inputs=_inputs_flat,
6729                              attrs=_attrs, ctx=_ctx, name=name)
6730   _execute.record_gradient(
6731       "TensorArrayScatterV3", _inputs_flat, _attrs, _result, name)
6732   _result, = _result
6733   return _result
6734 
6735 
6736 def tensor_array_size(handle, flow_in, name=None):
6737   r"""TODO: add doc.
6738 
6739   Args:
6740     handle: A `Tensor` of type mutable `string`.
6741     flow_in: A `Tensor` of type `float32`.
6742     name: A name for the operation (optional).
6743 
6744   Returns:
6745     A `Tensor` of type `int32`.
6746   """
6747   _ctx = _context._context
6748   if _ctx is None or not _ctx._eager_context.is_eager:
6749     _, _, _op = _op_def_lib._apply_op_helper(
6750         "TensorArraySize", handle=handle, flow_in=flow_in, name=name)
6751     _result = _op.outputs[:]
6752     _inputs_flat = _op.inputs
6753     _attrs = None
6754     _execute.record_gradient(
6755       "TensorArraySize", _inputs_flat, _attrs, _result, name)
6756     _result, = _result
6757     return _result
6758 
6759   else:
6760     raise RuntimeError("tensor_array_size op does not support eager execution. Arg 'handle' is a ref.")
6761 
6762 
6763   raise RuntimeError("tensor_array_size op does not support eager execution. Arg 'handle' is a ref.")
6764 
6765 def tensor_array_size_v2(handle, flow_in, name=None):
6766   r"""Deprecated. Use TensorArraySizeV3
6767 
6768   Args:
6769     handle: A `Tensor` of type `string`.
6770     flow_in: A `Tensor` of type `float32`.
6771     name: A name for the operation (optional).
6772 
6773   Returns:
6774     A `Tensor` of type `int32`.
6775   """
6776   _ctx = _context._context
6777   if _ctx is None or not _ctx._eager_context.is_eager:
6778     _, _, _op = _op_def_lib._apply_op_helper(
6779         "TensorArraySizeV2", handle=handle, flow_in=flow_in, name=name)
6780     _result = _op.outputs[:]
6781     _inputs_flat = _op.inputs
6782     _attrs = None
6783     _execute.record_gradient(
6784       "TensorArraySizeV2", _inputs_flat, _attrs, _result, name)
6785     _result, = _result
6786     return _result
6787 
6788   else:
6789     try:
6790       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6791         _ctx._context_handle, _ctx._eager_context.device_name,
6792         "TensorArraySizeV2", name, _ctx._post_execution_callbacks, handle,
6793         flow_in)
6794       return _result
6795     except _core._FallbackException:
6796       return tensor_array_size_v2_eager_fallback(
6797           handle, flow_in, name=name, ctx=_ctx)
6798     except _core._NotOkStatusException as e:
6799       if name is not None:
6800         message = e.message + " name: " + name
6801       else:
6802         message = e.message
6803       _six.raise_from(_core._status_to_exception(e.code, message), None)
6804 
6805 
6806 def tensor_array_size_v2_eager_fallback(handle, flow_in, name=None, ctx=None):
6807   r"""This is the slowpath function for Eager mode.
6808   This is for function tensor_array_size_v2
6809   """
6810   _ctx = ctx if ctx else _context.context()
6811   handle = _ops.convert_to_tensor(handle, _dtypes.string)
6812   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
6813   _inputs_flat = [handle, flow_in]
6814   _attrs = None
6815   _result = _execute.execute(b"TensorArraySizeV2", 1, inputs=_inputs_flat,
6816                              attrs=_attrs, ctx=_ctx, name=name)
6817   _execute.record_gradient(
6818       "TensorArraySizeV2", _inputs_flat, _attrs, _result, name)
6819   _result, = _result
6820   return _result
6821 
6822 
6823 def tensor_array_size_v3(handle, flow_in, name=None):
6824   r"""Get the current size of the TensorArray.
6825 
6826   Args:
6827     handle: A `Tensor` of type `resource`.
6828       The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
6829     flow_in: A `Tensor` of type `float32`.
6830       A float scalar that enforces proper chaining of operations.
6831     name: A name for the operation (optional).
6832 
6833   Returns:
6834     A `Tensor` of type `int32`.
6835   """
6836   _ctx = _context._context
6837   if _ctx is None or not _ctx._eager_context.is_eager:
6838     _, _, _op = _op_def_lib._apply_op_helper(
6839         "TensorArraySizeV3", handle=handle, flow_in=flow_in, name=name)
6840     _result = _op.outputs[:]
6841     _inputs_flat = _op.inputs
6842     _attrs = None
6843     _execute.record_gradient(
6844       "TensorArraySizeV3", _inputs_flat, _attrs, _result, name)
6845     _result, = _result
6846     return _result
6847 
6848   else:
6849     try:
6850       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6851         _ctx._context_handle, _ctx._eager_context.device_name,
6852         "TensorArraySizeV3", name, _ctx._post_execution_callbacks, handle,
6853         flow_in)
6854       return _result
6855     except _core._FallbackException:
6856       return tensor_array_size_v3_eager_fallback(
6857           handle, flow_in, name=name, ctx=_ctx)
6858     except _core._NotOkStatusException as e:
6859       if name is not None:
6860         message = e.message + " name: " + name
6861       else:
6862         message = e.message
6863       _six.raise_from(_core._status_to_exception(e.code, message), None)
6864 
6865 
6866 def tensor_array_size_v3_eager_fallback(handle, flow_in, name=None, ctx=None):
6867   r"""This is the slowpath function for Eager mode.
6868   This is for function tensor_array_size_v3
6869   """
6870   _ctx = ctx if ctx else _context.context()
6871   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
6872   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
6873   _inputs_flat = [handle, flow_in]
6874   _attrs = None
6875   _result = _execute.execute(b"TensorArraySizeV3", 1, inputs=_inputs_flat,
6876                              attrs=_attrs, ctx=_ctx, name=name)
6877   _execute.record_gradient(
6878       "TensorArraySizeV3", _inputs_flat, _attrs, _result, name)
6879   _result, = _result
6880   return _result
6881 
6882 
6883 def tensor_array_split(handle, value, lengths, flow_in, name=None):
6884   r"""TODO: add doc.
6885 
6886   Args:
6887     handle: A `Tensor` of type mutable `string`.
6888     value: A `Tensor`.
6889     lengths: A `Tensor` of type `int64`.
6890     flow_in: A `Tensor` of type `float32`.
6891     name: A name for the operation (optional).
6892 
6893   Returns:
6894     A `Tensor` of type `float32`.
6895   """
6896   _ctx = _context._context
6897   if _ctx is None or not _ctx._eager_context.is_eager:
6898     _, _, _op = _op_def_lib._apply_op_helper(
6899         "TensorArraySplit", handle=handle, value=value, lengths=lengths,
6900         flow_in=flow_in, name=name)
6901     _result = _op.outputs[:]
6902     _inputs_flat = _op.inputs
6903     _attrs = ("T", _op.get_attr("T"))
6904     _execute.record_gradient(
6905       "TensorArraySplit", _inputs_flat, _attrs, _result, name)
6906     _result, = _result
6907     return _result
6908 
6909   else:
6910     raise RuntimeError("tensor_array_split op does not support eager execution. Arg 'handle' is a ref.")
6911 
6912 
6913   raise RuntimeError("tensor_array_split op does not support eager execution. Arg 'handle' is a ref.")
6914 
6915 def tensor_array_split_v2(handle, value, lengths, flow_in, name=None):
6916   r"""Deprecated. Use TensorArraySplitV3
6917 
6918   Args:
6919     handle: A `Tensor` of type `string`.
6920     value: A `Tensor`.
6921     lengths: A `Tensor` of type `int64`.
6922     flow_in: A `Tensor` of type `float32`.
6923     name: A name for the operation (optional).
6924 
6925   Returns:
6926     A `Tensor` of type `float32`.
6927   """
6928   _ctx = _context._context
6929   if _ctx is None or not _ctx._eager_context.is_eager:
6930     _, _, _op = _op_def_lib._apply_op_helper(
6931         "TensorArraySplitV2", handle=handle, value=value, lengths=lengths,
6932         flow_in=flow_in, name=name)
6933     _result = _op.outputs[:]
6934     _inputs_flat = _op.inputs
6935     _attrs = ("T", _op.get_attr("T"))
6936     _execute.record_gradient(
6937       "TensorArraySplitV2", _inputs_flat, _attrs, _result, name)
6938     _result, = _result
6939     return _result
6940 
6941   else:
6942     try:
6943       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6944         _ctx._context_handle, _ctx._eager_context.device_name,
6945         "TensorArraySplitV2", name, _ctx._post_execution_callbacks, handle,
6946         value, lengths, flow_in)
6947       return _result
6948     except _core._FallbackException:
6949       return tensor_array_split_v2_eager_fallback(
6950           handle, value, lengths, flow_in, name=name, ctx=_ctx)
6951     except _core._NotOkStatusException as e:
6952       if name is not None:
6953         message = e.message + " name: " + name
6954       else:
6955         message = e.message
6956       _six.raise_from(_core._status_to_exception(e.code, message), None)
6957 
6958 
6959 def tensor_array_split_v2_eager_fallback(handle, value, lengths, flow_in, name=None, ctx=None):
6960   r"""This is the slowpath function for Eager mode.
6961   This is for function tensor_array_split_v2
6962   """
6963   _ctx = ctx if ctx else _context.context()
6964   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
6965   handle = _ops.convert_to_tensor(handle, _dtypes.string)
6966   lengths = _ops.convert_to_tensor(lengths, _dtypes.int64)
6967   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
6968   _inputs_flat = [handle, value, lengths, flow_in]
6969   _attrs = ("T", _attr_T)
6970   _result = _execute.execute(b"TensorArraySplitV2", 1, inputs=_inputs_flat,
6971                              attrs=_attrs, ctx=_ctx, name=name)
6972   _execute.record_gradient(
6973       "TensorArraySplitV2", _inputs_flat, _attrs, _result, name)
6974   _result, = _result
6975   return _result
6976 
6977 
6978 def tensor_array_split_v3(handle, value, lengths, flow_in, name=None):
6979   r"""Split the data from the input value into TensorArray elements.
6980 
6981   Assuming that `lengths` takes on values
6982 
6983     ```(n0, n1, ..., n(T-1))```
6984 
6985   and that `value` has shape
6986 
6987     ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
6988 
6989   this splits values into a TensorArray with T tensors.
6990 
6991   TensorArray index t will be the subtensor of values with starting position
6992 
6993     ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
6994 
6995   and having size
6996 
6997     ```nt x d0 x d1 x ...```
6998 
6999   Args:
7000     handle: A `Tensor` of type `resource`. The handle to a TensorArray.
7001     value: A `Tensor`. The concatenated tensor to write to the TensorArray.
7002     lengths: A `Tensor` of type `int64`.
7003       The vector of lengths, how to split the rows of value into the
7004       TensorArray.
7005     flow_in: A `Tensor` of type `float32`.
7006       A float scalar that enforces proper chaining of operations.
7007     name: A name for the operation (optional).
7008 
7009   Returns:
7010     A `Tensor` of type `float32`.
7011   """
7012   _ctx = _context._context
7013   if _ctx is None or not _ctx._eager_context.is_eager:
7014     _, _, _op = _op_def_lib._apply_op_helper(
7015         "TensorArraySplitV3", handle=handle, value=value, lengths=lengths,
7016         flow_in=flow_in, name=name)
7017     _result = _op.outputs[:]
7018     _inputs_flat = _op.inputs
7019     _attrs = ("T", _op.get_attr("T"))
7020     _execute.record_gradient(
7021       "TensorArraySplitV3", _inputs_flat, _attrs, _result, name)
7022     _result, = _result
7023     return _result
7024 
7025   else:
7026     try:
7027       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7028         _ctx._context_handle, _ctx._eager_context.device_name,
7029         "TensorArraySplitV3", name, _ctx._post_execution_callbacks, handle,
7030         value, lengths, flow_in)
7031       return _result
7032     except _core._FallbackException:
7033       return tensor_array_split_v3_eager_fallback(
7034           handle, value, lengths, flow_in, name=name, ctx=_ctx)
7035     except _core._NotOkStatusException as e:
7036       if name is not None:
7037         message = e.message + " name: " + name
7038       else:
7039         message = e.message
7040       _six.raise_from(_core._status_to_exception(e.code, message), None)
7041 
7042 
7043 def tensor_array_split_v3_eager_fallback(handle, value, lengths, flow_in, name=None, ctx=None):
7044   r"""This is the slowpath function for Eager mode.
7045   This is for function tensor_array_split_v3
7046   """
7047   _ctx = ctx if ctx else _context.context()
7048   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
7049   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
7050   lengths = _ops.convert_to_tensor(lengths, _dtypes.int64)
7051   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
7052   _inputs_flat = [handle, value, lengths, flow_in]
7053   _attrs = ("T", _attr_T)
7054   _result = _execute.execute(b"TensorArraySplitV3", 1, inputs=_inputs_flat,
7055                              attrs=_attrs, ctx=_ctx, name=name)
7056   _execute.record_gradient(
7057       "TensorArraySplitV3", _inputs_flat, _attrs, _result, name)
7058   _result, = _result
7059   return _result
7060 
7061 
7062 def tensor_array_unpack(handle, value, flow_in, name=None):
7063   r"""TODO: add doc.
7064 
7065   Args:
7066     handle: A `Tensor` of type mutable `string`.
7067     value: A `Tensor`.
7068     flow_in: A `Tensor` of type `float32`.
7069     name: A name for the operation (optional).
7070 
7071   Returns:
7072     A `Tensor` of type `float32`.
7073   """
7074   _ctx = _context._context
7075   if _ctx is None or not _ctx._eager_context.is_eager:
7076     _, _, _op = _op_def_lib._apply_op_helper(
7077         "TensorArrayUnpack", handle=handle, value=value, flow_in=flow_in,
7078         name=name)
7079     _result = _op.outputs[:]
7080     _inputs_flat = _op.inputs
7081     _attrs = ("T", _op.get_attr("T"))
7082     _execute.record_gradient(
7083       "TensorArrayUnpack", _inputs_flat, _attrs, _result, name)
7084     _result, = _result
7085     return _result
7086 
7087   else:
7088     raise RuntimeError("tensor_array_unpack op does not support eager execution. Arg 'handle' is a ref.")
7089 
7090 
7091   raise RuntimeError("tensor_array_unpack op does not support eager execution. Arg 'handle' is a ref.")
7092 
7093 def tensor_array_v2(size, dtype, element_shape=None, dynamic_size=False, clear_after_read=True, tensor_array_name="", name=None):
7094   r"""Deprecated. Use TensorArrayV3
7095 
7096   Args:
7097     size: A `Tensor` of type `int32`.
7098     dtype: A `tf.DType`.
7099     element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
7100     dynamic_size: An optional `bool`. Defaults to `False`.
7101     clear_after_read: An optional `bool`. Defaults to `True`.
7102     tensor_array_name: An optional `string`. Defaults to `""`.
7103     name: A name for the operation (optional).
7104 
7105   Returns:
7106     A `Tensor` of type `string`.
7107   """
7108   _ctx = _context._context
7109   if _ctx is None or not _ctx._eager_context.is_eager:
7110     dtype = _execute.make_type(dtype, "dtype")
7111     if element_shape is None:
7112       element_shape = None
7113     element_shape = _execute.make_shape(element_shape, "element_shape")
7114     if dynamic_size is None:
7115       dynamic_size = False
7116     dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size")
7117     if clear_after_read is None:
7118       clear_after_read = True
7119     clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read")
7120     if tensor_array_name is None:
7121       tensor_array_name = ""
7122     tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name")
7123     _, _, _op = _op_def_lib._apply_op_helper(
7124         "TensorArrayV2", size=size, dtype=dtype, element_shape=element_shape,
7125         dynamic_size=dynamic_size, clear_after_read=clear_after_read,
7126         tensor_array_name=tensor_array_name, name=name)
7127     _result = _op.outputs[:]
7128     _inputs_flat = _op.inputs
7129     _attrs = ("dtype", _op.get_attr("dtype"), "element_shape",
7130               _op.get_attr("element_shape"), "dynamic_size",
7131               _op.get_attr("dynamic_size"), "clear_after_read",
7132               _op.get_attr("clear_after_read"), "tensor_array_name",
7133               _op.get_attr("tensor_array_name"))
7134     _execute.record_gradient(
7135       "TensorArrayV2", _inputs_flat, _attrs, _result, name)
7136     _result, = _result
7137     return _result
7138 
7139   else:
7140     try:
7141       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7142         _ctx._context_handle, _ctx._eager_context.device_name,
7143         "TensorArrayV2", name, _ctx._post_execution_callbacks, size, "dtype",
7144         dtype, "element_shape", element_shape, "dynamic_size", dynamic_size,
7145         "clear_after_read", clear_after_read, "tensor_array_name",
7146         tensor_array_name)
7147       return _result
7148     except _core._FallbackException:
7149       return tensor_array_v2_eager_fallback(
7150           size, dtype=dtype, element_shape=element_shape,
7151           dynamic_size=dynamic_size, clear_after_read=clear_after_read,
7152           tensor_array_name=tensor_array_name, name=name, ctx=_ctx)
7153     except _core._NotOkStatusException as e:
7154       if name is not None:
7155         message = e.message + " name: " + name
7156       else:
7157         message = e.message
7158       _six.raise_from(_core._status_to_exception(e.code, message), None)
7159 
7160 
7161 def tensor_array_v2_eager_fallback(size, dtype, element_shape=None, dynamic_size=False, clear_after_read=True, tensor_array_name="", name=None, ctx=None):
7162   r"""This is the slowpath function for Eager mode.
7163   This is for function tensor_array_v2
7164   """
7165   _ctx = ctx if ctx else _context.context()
7166   dtype = _execute.make_type(dtype, "dtype")
7167   if element_shape is None:
7168     element_shape = None
7169   element_shape = _execute.make_shape(element_shape, "element_shape")
7170   if dynamic_size is None:
7171     dynamic_size = False
7172   dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size")
7173   if clear_after_read is None:
7174     clear_after_read = True
7175   clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read")
7176   if tensor_array_name is None:
7177     tensor_array_name = ""
7178   tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name")
7179   size = _ops.convert_to_tensor(size, _dtypes.int32)
7180   _inputs_flat = [size]
7181   _attrs = ("dtype", dtype, "element_shape", element_shape, "dynamic_size",
7182   dynamic_size, "clear_after_read", clear_after_read, "tensor_array_name",
7183   tensor_array_name)
7184   _result = _execute.execute(b"TensorArrayV2", 1, inputs=_inputs_flat,
7185                              attrs=_attrs, ctx=_ctx, name=name)
7186   _execute.record_gradient(
7187       "TensorArrayV2", _inputs_flat, _attrs, _result, name)
7188   _result, = _result
7189   return _result
7190 
7191 
7192 _tensor_array_v3_outputs = ["handle", "flow"]
7193 _TensorArrayV3Output = _collections.namedtuple(
7194     "TensorArrayV3", _tensor_array_v3_outputs)
7195 
7196 
7197 def tensor_array_v3(size, dtype, element_shape=None, dynamic_size=False, clear_after_read=True, identical_element_shapes=False, tensor_array_name="", name=None):
7198   r"""An array of Tensors of given size.
7199 
7200   Write data via Write and read via Read or Pack.
7201 
7202   Args:
7203     size: A `Tensor` of type `int32`. The size of the array.
7204     dtype: A `tf.DType`. The type of the elements on the tensor_array.
7205     element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
7206       The expected shape of an element, if known. Used to
7207       validate the shapes of TensorArray elements. If this shape is not
7208       fully specified, gathering zero-size TensorArrays is an error.
7209     dynamic_size: An optional `bool`. Defaults to `False`.
7210       A boolean that determines whether writes to the TensorArray
7211       are allowed to grow the size.  By default, this is not allowed.
7212     clear_after_read: An optional `bool`. Defaults to `True`.
7213       If true (default), Tensors in the TensorArray are cleared
7214       after being read.  This disables multiple read semantics but allows early
7215       release of memory.
7216     identical_element_shapes: An optional `bool`. Defaults to `False`.
7217       If true (default is false), then all
7218       elements in the TensorArray will be expected to have have identical shapes.
7219       This allows certain behaviors, like dynamically checking for
7220       consistent shapes on write, and being able to fill in properly
7221       shaped zero tensors on stack -- even if the element_shape attribute
7222       is not fully defined.
7223     tensor_array_name: An optional `string`. Defaults to `""`.
7224       Overrides the name used for the temporary tensor_array
7225       resource. Default value is the name of the 'TensorArray' op (which
7226       is guaranteed unique).
7227     name: A name for the operation (optional).
7228 
7229   Returns:
7230     A tuple of `Tensor` objects (handle, flow).
7231 
7232     handle: A `Tensor` of type `resource`.
7233     flow: A `Tensor` of type `float32`.
7234   """
7235   _ctx = _context._context
7236   if _ctx is None or not _ctx._eager_context.is_eager:
7237     dtype = _execute.make_type(dtype, "dtype")
7238     if element_shape is None:
7239       element_shape = None
7240     element_shape = _execute.make_shape(element_shape, "element_shape")
7241     if dynamic_size is None:
7242       dynamic_size = False
7243     dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size")
7244     if clear_after_read is None:
7245       clear_after_read = True
7246     clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read")
7247     if identical_element_shapes is None:
7248       identical_element_shapes = False
7249     identical_element_shapes = _execute.make_bool(identical_element_shapes, "identical_element_shapes")
7250     if tensor_array_name is None:
7251       tensor_array_name = ""
7252     tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name")
7253     _, _, _op = _op_def_lib._apply_op_helper(
7254         "TensorArrayV3", size=size, dtype=dtype, element_shape=element_shape,
7255         dynamic_size=dynamic_size, clear_after_read=clear_after_read,
7256         identical_element_shapes=identical_element_shapes,
7257         tensor_array_name=tensor_array_name, name=name)
7258     _result = _op.outputs[:]
7259     _inputs_flat = _op.inputs
7260     _attrs = ("dtype", _op.get_attr("dtype"), "element_shape",
7261               _op.get_attr("element_shape"), "dynamic_size",
7262               _op.get_attr("dynamic_size"), "clear_after_read",
7263               _op.get_attr("clear_after_read"), "identical_element_shapes",
7264               _op.get_attr("identical_element_shapes"), "tensor_array_name",
7265               _op.get_attr("tensor_array_name"))
7266     _execute.record_gradient(
7267       "TensorArrayV3", _inputs_flat, _attrs, _result, name)
7268     _result = _TensorArrayV3Output._make(_result)
7269     return _result
7270 
7271   else:
7272     try:
7273       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7274         _ctx._context_handle, _ctx._eager_context.device_name,
7275         "TensorArrayV3", name, _ctx._post_execution_callbacks, size, "dtype",
7276         dtype, "element_shape", element_shape, "dynamic_size", dynamic_size,
7277         "clear_after_read", clear_after_read, "identical_element_shapes",
7278         identical_element_shapes, "tensor_array_name", tensor_array_name)
7279       _result = _TensorArrayV3Output._make(_result)
7280       return _result
7281     except _core._FallbackException:
7282       return tensor_array_v3_eager_fallback(
7283           size, dtype=dtype, element_shape=element_shape,
7284           dynamic_size=dynamic_size, clear_after_read=clear_after_read,
7285           identical_element_shapes=identical_element_shapes,
7286           tensor_array_name=tensor_array_name, name=name, ctx=_ctx)
7287     except _core._NotOkStatusException as e:
7288       if name is not None:
7289         message = e.message + " name: " + name
7290       else:
7291         message = e.message
7292       _six.raise_from(_core._status_to_exception(e.code, message), None)
7293 
7294 
7295 def tensor_array_v3_eager_fallback(size, dtype, element_shape=None, dynamic_size=False, clear_after_read=True, identical_element_shapes=False, tensor_array_name="", name=None, ctx=None):
7296   r"""This is the slowpath function for Eager mode.
7297   This is for function tensor_array_v3
7298   """
7299   _ctx = ctx if ctx else _context.context()
7300   dtype = _execute.make_type(dtype, "dtype")
7301   if element_shape is None:
7302     element_shape = None
7303   element_shape = _execute.make_shape(element_shape, "element_shape")
7304   if dynamic_size is None:
7305     dynamic_size = False
7306   dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size")
7307   if clear_after_read is None:
7308     clear_after_read = True
7309   clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read")
7310   if identical_element_shapes is None:
7311     identical_element_shapes = False
7312   identical_element_shapes = _execute.make_bool(identical_element_shapes, "identical_element_shapes")
7313   if tensor_array_name is None:
7314     tensor_array_name = ""
7315   tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name")
7316   size = _ops.convert_to_tensor(size, _dtypes.int32)
7317   _inputs_flat = [size]
7318   _attrs = ("dtype", dtype, "element_shape", element_shape, "dynamic_size",
7319   dynamic_size, "clear_after_read", clear_after_read,
7320   "identical_element_shapes", identical_element_shapes, "tensor_array_name",
7321   tensor_array_name)
7322   _result = _execute.execute(b"TensorArrayV3", 2, inputs=_inputs_flat,
7323                              attrs=_attrs, ctx=_ctx, name=name)
7324   _execute.record_gradient(
7325       "TensorArrayV3", _inputs_flat, _attrs, _result, name)
7326   _result = _TensorArrayV3Output._make(_result)
7327   return _result
7328 
7329 
7330 def tensor_array_write(handle, index, value, flow_in, name=None):
7331   r"""TODO: add doc.
7332 
7333   Args:
7334     handle: A `Tensor` of type mutable `string`.
7335     index: A `Tensor` of type `int32`.
7336     value: A `Tensor`.
7337     flow_in: A `Tensor` of type `float32`.
7338     name: A name for the operation (optional).
7339 
7340   Returns:
7341     A `Tensor` of type `float32`.
7342   """
7343   _ctx = _context._context
7344   if _ctx is None or not _ctx._eager_context.is_eager:
7345     _, _, _op = _op_def_lib._apply_op_helper(
7346         "TensorArrayWrite", handle=handle, index=index, value=value,
7347         flow_in=flow_in, name=name)
7348     _result = _op.outputs[:]
7349     _inputs_flat = _op.inputs
7350     _attrs = ("T", _op.get_attr("T"))
7351     _execute.record_gradient(
7352       "TensorArrayWrite", _inputs_flat, _attrs, _result, name)
7353     _result, = _result
7354     return _result
7355 
7356   else:
7357     raise RuntimeError("tensor_array_write op does not support eager execution. Arg 'handle' is a ref.")
7358 
7359 
7360   raise RuntimeError("tensor_array_write op does not support eager execution. Arg 'handle' is a ref.")
7361 
7362 def tensor_array_write_v2(handle, index, value, flow_in, name=None):
7363   r"""Deprecated. Use TensorArrayGradV3
7364 
7365   Args:
7366     handle: A `Tensor` of type `string`.
7367     index: A `Tensor` of type `int32`.
7368     value: A `Tensor`.
7369     flow_in: A `Tensor` of type `float32`.
7370     name: A name for the operation (optional).
7371 
7372   Returns:
7373     A `Tensor` of type `float32`.
7374   """
7375   _ctx = _context._context
7376   if _ctx is None or not _ctx._eager_context.is_eager:
7377     _, _, _op = _op_def_lib._apply_op_helper(
7378         "TensorArrayWriteV2", handle=handle, index=index, value=value,
7379         flow_in=flow_in, name=name)
7380     _result = _op.outputs[:]
7381     _inputs_flat = _op.inputs
7382     _attrs = ("T", _op.get_attr("T"))
7383     _execute.record_gradient(
7384       "TensorArrayWriteV2", _inputs_flat, _attrs, _result, name)
7385     _result, = _result
7386     return _result
7387 
7388   else:
7389     try:
7390       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7391         _ctx._context_handle, _ctx._eager_context.device_name,
7392         "TensorArrayWriteV2", name, _ctx._post_execution_callbacks, handle,
7393         index, value, flow_in)
7394       return _result
7395     except _core._FallbackException:
7396       return tensor_array_write_v2_eager_fallback(
7397           handle, index, value, flow_in, name=name, ctx=_ctx)
7398     except _core._NotOkStatusException as e:
7399       if name is not None:
7400         message = e.message + " name: " + name
7401       else:
7402         message = e.message
7403       _six.raise_from(_core._status_to_exception(e.code, message), None)
7404 
7405 
7406 def tensor_array_write_v2_eager_fallback(handle, index, value, flow_in, name=None, ctx=None):
7407   r"""This is the slowpath function for Eager mode.
7408   This is for function tensor_array_write_v2
7409   """
7410   _ctx = ctx if ctx else _context.context()
7411   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
7412   handle = _ops.convert_to_tensor(handle, _dtypes.string)
7413   index = _ops.convert_to_tensor(index, _dtypes.int32)
7414   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
7415   _inputs_flat = [handle, index, value, flow_in]
7416   _attrs = ("T", _attr_T)
7417   _result = _execute.execute(b"TensorArrayWriteV2", 1, inputs=_inputs_flat,
7418                              attrs=_attrs, ctx=_ctx, name=name)
7419   _execute.record_gradient(
7420       "TensorArrayWriteV2", _inputs_flat, _attrs, _result, name)
7421   _result, = _result
7422   return _result
7423 
7424 
7425 def tensor_array_write_v3(handle, index, value, flow_in, name=None):
7426   r"""Push an element onto the tensor_array.
7427 
7428   Args:
7429     handle: A `Tensor` of type `resource`. The handle to a TensorArray.
7430     index: A `Tensor` of type `int32`.
7431       The position to write to inside the TensorArray.
7432     value: A `Tensor`. The tensor to write to the TensorArray.
7433     flow_in: A `Tensor` of type `float32`.
7434       A float scalar that enforces proper chaining of operations.
7435     name: A name for the operation (optional).
7436 
7437   Returns:
7438     A `Tensor` of type `float32`.
7439   """
7440   _ctx = _context._context
7441   if _ctx is None or not _ctx._eager_context.is_eager:
7442     _, _, _op = _op_def_lib._apply_op_helper(
7443         "TensorArrayWriteV3", handle=handle, index=index, value=value,
7444         flow_in=flow_in, name=name)
7445     _result = _op.outputs[:]
7446     _inputs_flat = _op.inputs
7447     _attrs = ("T", _op.get_attr("T"))
7448     _execute.record_gradient(
7449       "TensorArrayWriteV3", _inputs_flat, _attrs, _result, name)
7450     _result, = _result
7451     return _result
7452 
7453   else:
7454     try:
7455       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7456         _ctx._context_handle, _ctx._eager_context.device_name,
7457         "TensorArrayWriteV3", name, _ctx._post_execution_callbacks, handle,
7458         index, value, flow_in)
7459       return _result
7460     except _core._FallbackException:
7461       return tensor_array_write_v3_eager_fallback(
7462           handle, index, value, flow_in, name=name, ctx=_ctx)
7463     except _core._NotOkStatusException as e:
7464       if name is not None:
7465         message = e.message + " name: " + name
7466       else:
7467         message = e.message
7468       _six.raise_from(_core._status_to_exception(e.code, message), None)
7469 
7470 
7471 def tensor_array_write_v3_eager_fallback(handle, index, value, flow_in, name=None, ctx=None):
7472   r"""This is the slowpath function for Eager mode.
7473   This is for function tensor_array_write_v3
7474   """
7475   _ctx = ctx if ctx else _context.context()
7476   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
7477   handle = _ops.convert_to_tensor(handle, _dtypes.resource)
7478   index = _ops.convert_to_tensor(index, _dtypes.int32)
7479   flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
7480   _inputs_flat = [handle, index, value, flow_in]
7481   _attrs = ("T", _attr_T)
7482   _result = _execute.execute(b"TensorArrayWriteV3", 1, inputs=_inputs_flat,
7483                              attrs=_attrs, ctx=_ctx, name=name)
7484   _execute.record_gradient(
7485       "TensorArrayWriteV3", _inputs_flat, _attrs, _result, name)
7486   _result, = _result
7487   return _result
7488 
7489 
7490 def unstage(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
7491   r"""Op is similar to a lightweight Dequeue.
7492 
7493   The basic functionality is similar to dequeue with many fewer
7494   capabilities and options.  This Op is optimized for performance.
7495 
7496   Args:
7497     dtypes: A list of `tf.DTypes` that has length `>= 1`.
7498     capacity: An optional `int` that is `>= 0`. Defaults to `0`.
7499     memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
7500     container: An optional `string`. Defaults to `""`.
7501     shared_name: An optional `string`. Defaults to `""`.
7502     name: A name for the operation (optional).
7503 
7504   Returns:
7505     A list of `Tensor` objects of type `dtypes`.
7506   """
7507   _ctx = _context._context
7508   if _ctx is None or not _ctx._eager_context.is_eager:
7509     if not isinstance(dtypes, (list, tuple)):
7510       raise TypeError(
7511           "Expected list for 'dtypes' argument to "
7512           "'unstage' Op, not %r." % dtypes)
7513     dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
7514     if capacity is None:
7515       capacity = 0
7516     capacity = _execute.make_int(capacity, "capacity")
7517     if memory_limit is None:
7518       memory_limit = 0
7519     memory_limit = _execute.make_int(memory_limit, "memory_limit")
7520     if container is None:
7521       container = ""
7522     container = _execute.make_str(container, "container")
7523     if shared_name is None:
7524       shared_name = ""
7525     shared_name = _execute.make_str(shared_name, "shared_name")
7526     _, _, _op = _op_def_lib._apply_op_helper(
7527         "Unstage", dtypes=dtypes, capacity=capacity,
7528         memory_limit=memory_limit, container=container,
7529         shared_name=shared_name, name=name)
7530     _result = _op.outputs[:]
7531     if not _result:
7532       return _op
7533     _inputs_flat = _op.inputs
7534     _attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
7535               _op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
7536               "container", _op.get_attr("container"), "shared_name",
7537               _op.get_attr("shared_name"))
7538     _execute.record_gradient(
7539       "Unstage", _inputs_flat, _attrs, _result, name)
7540     return _result
7541 
7542   else:
7543     try:
7544       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7545         _ctx._context_handle, _ctx._eager_context.device_name, "Unstage",
7546         name, _ctx._post_execution_callbacks, "capacity", capacity,
7547         "memory_limit", memory_limit, "dtypes", dtypes, "container",
7548         container, "shared_name", shared_name)
7549       return _result
7550     except _core._FallbackException:
7551       return unstage_eager_fallback(
7552           capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
7553           container=container, shared_name=shared_name, name=name, ctx=_ctx)
7554     except _core._NotOkStatusException as e:
7555       if name is not None:
7556         message = e.message + " name: " + name
7557       else:
7558         message = e.message
7559       _six.raise_from(_core._status_to_exception(e.code, message), None)
7560 
7561 
7562 def unstage_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
7563   r"""This is the slowpath function for Eager mode.
7564   This is for function unstage
7565   """
7566   _ctx = ctx if ctx else _context.context()
7567   if not isinstance(dtypes, (list, tuple)):
7568     raise TypeError(
7569         "Expected list for 'dtypes' argument to "
7570         "'unstage' Op, not %r." % dtypes)
7571   dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
7572   if capacity is None:
7573     capacity = 0
7574   capacity = _execute.make_int(capacity, "capacity")
7575   if memory_limit is None:
7576     memory_limit = 0
7577   memory_limit = _execute.make_int(memory_limit, "memory_limit")
7578   if container is None:
7579     container = ""
7580   container = _execute.make_str(container, "container")
7581   if shared_name is None:
7582     shared_name = ""
7583   shared_name = _execute.make_str(shared_name, "shared_name")
7584   _inputs_flat = []
7585   _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
7586   dtypes, "container", container, "shared_name", shared_name)
7587   _result = _execute.execute(b"Unstage", len(dtypes), inputs=_inputs_flat,
7588                              attrs=_attrs, ctx=_ctx, name=name)
7589   _execute.record_gradient(
7590       "Unstage", _inputs_flat, _attrs, _result, name)
7591   return _result
7592 
7593 def _InitOpDefLibrary(op_list_proto_bytes):
7594   op_list = _op_def_pb2.OpList()
7595   op_list.ParseFromString(op_list_proto_bytes)
7596   _op_def_registry.register_op_list(op_list)
7597   op_def_lib = _op_def_library.OpDefLibrary()
7598   op_def_lib.add_op_list(op_list)
7599   return op_def_lib
7600 # op {
7601 #   name: "AccumulatorApplyGradient"
7602 #   input_arg {
7603 #     name: "handle"
7604 #     type: DT_STRING
7605 #     is_ref: true
7606 #   }
7607 #   input_arg {
7608 #     name: "local_step"
7609 #     type: DT_INT64
7610 #   }
7611 #   input_arg {
7612 #     name: "gradient"
7613 #     type_attr: "dtype"
7614 #   }
7615 #   attr {
7616 #     name: "dtype"
7617 #     type: "type"
7618 #     allowed_values {
7619 #       list {
7620 #         type: DT_FLOAT
7621 #         type: DT_DOUBLE
7622 #         type: DT_INT32
7623 #         type: DT_UINT8
7624 #         type: DT_INT16
7625 #         type: DT_INT8
7626 #         type: DT_COMPLEX64
7627 #         type: DT_INT64
7628 #         type: DT_QINT8
7629 #         type: DT_QUINT8
7630 #         type: DT_QINT32
7631 #         type: DT_BFLOAT16
7632 #         type: DT_UINT16
7633 #         type: DT_COMPLEX128
7634 #         type: DT_HALF
7635 #         type: DT_UINT32
7636 #         type: DT_UINT64
7637 #       }
7638 #     }
7639 #   }
7640 # }
7641 # op {
7642 #   name: "AccumulatorNumAccumulated"
7643 #   input_arg {
7644 #     name: "handle"
7645 #     type: DT_STRING
7646 #     is_ref: true
7647 #   }
7648 #   output_arg {
7649 #     name: "num_accumulated"
7650 #     type: DT_INT32
7651 #   }
7652 # }
7653 # op {
7654 #   name: "AccumulatorSetGlobalStep"
7655 #   input_arg {
7656 #     name: "handle"
7657 #     type: DT_STRING
7658 #     is_ref: true
7659 #   }
7660 #   input_arg {
7661 #     name: "new_global_step"
7662 #     type: DT_INT64
7663 #   }
7664 # }
7665 # op {
7666 #   name: "AccumulatorTakeGradient"
7667 #   input_arg {
7668 #     name: "handle"
7669 #     type: DT_STRING
7670 #     is_ref: true
7671 #   }
7672 #   input_arg {
7673 #     name: "num_required"
7674 #     type: DT_INT32
7675 #   }
7676 #   output_arg {
7677 #     name: "average"
7678 #     type_attr: "dtype"
7679 #   }
7680 #   attr {
7681 #     name: "dtype"
7682 #     type: "type"
7683 #     allowed_values {
7684 #       list {
7685 #         type: DT_FLOAT
7686 #         type: DT_DOUBLE
7687 #         type: DT_INT32
7688 #         type: DT_UINT8
7689 #         type: DT_INT16
7690 #         type: DT_INT8
7691 #         type: DT_COMPLEX64
7692 #         type: DT_INT64
7693 #         type: DT_QINT8
7694 #         type: DT_QUINT8
7695 #         type: DT_QINT32
7696 #         type: DT_BFLOAT16
7697 #         type: DT_UINT16
7698 #         type: DT_COMPLEX128
7699 #         type: DT_HALF
7700 #         type: DT_UINT32
7701 #         type: DT_UINT64
7702 #       }
7703 #     }
7704 #   }
7705 # }
7706 # op {
7707 #   name: "Barrier"
7708 #   output_arg {
7709 #     name: "handle"
7710 #     type: DT_STRING
7711 #     is_ref: true
7712 #   }
7713 #   attr {
7714 #     name: "component_types"
7715 #     type: "list(type)"
7716 #     has_minimum: true
7717 #     minimum: 1
7718 #   }
7719 #   attr {
7720 #     name: "shapes"
7721 #     type: "list(shape)"
7722 #     default_value {
7723 #       list {
7724 #       }
7725 #     }
7726 #     has_minimum: true
7727 #   }
7728 #   attr {
7729 #     name: "capacity"
7730 #     type: "int"
7731 #     default_value {
7732 #       i: -1
7733 #     }
7734 #   }
7735 #   attr {
7736 #     name: "container"
7737 #     type: "string"
7738 #     default_value {
7739 #       s: ""
7740 #     }
7741 #   }
7742 #   attr {
7743 #     name: "shared_name"
7744 #     type: "string"
7745 #     default_value {
7746 #       s: ""
7747 #     }
7748 #   }
7749 #   is_stateful: true
7750 # }
7751 # op {
7752 #   name: "BarrierClose"
7753 #   input_arg {
7754 #     name: "handle"
7755 #     type: DT_STRING
7756 #     is_ref: true
7757 #   }
7758 #   attr {
7759 #     name: "cancel_pending_enqueues"
7760 #     type: "bool"
7761 #     default_value {
7762 #       b: false
7763 #     }
7764 #   }
7765 # }
7766 # op {
7767 #   name: "BarrierIncompleteSize"
7768 #   input_arg {
7769 #     name: "handle"
7770 #     type: DT_STRING
7771 #     is_ref: true
7772 #   }
7773 #   output_arg {
7774 #     name: "size"
7775 #     type: DT_INT32
7776 #   }
7777 # }
7778 # op {
7779 #   name: "BarrierInsertMany"
7780 #   input_arg {
7781 #     name: "handle"
7782 #     type: DT_STRING
7783 #     is_ref: true
7784 #   }
7785 #   input_arg {
7786 #     name: "keys"
7787 #     type: DT_STRING
7788 #   }
7789 #   input_arg {
7790 #     name: "values"
7791 #     type_attr: "T"
7792 #   }
7793 #   attr {
7794 #     name: "T"
7795 #     type: "type"
7796 #   }
7797 #   attr {
7798 #     name: "component_index"
7799 #     type: "int"
7800 #   }
7801 # }
7802 # op {
7803 #   name: "BarrierReadySize"
7804 #   input_arg {
7805 #     name: "handle"
7806 #     type: DT_STRING
7807 #     is_ref: true
7808 #   }
7809 #   output_arg {
7810 #     name: "size"
7811 #     type: DT_INT32
7812 #   }
7813 # }
7814 # op {
7815 #   name: "BarrierTakeMany"
7816 #   input_arg {
7817 #     name: "handle"
7818 #     type: DT_STRING
7819 #     is_ref: true
7820 #   }
7821 #   input_arg {
7822 #     name: "num_elements"
7823 #     type: DT_INT32
7824 #   }
7825 #   output_arg {
7826 #     name: "indices"
7827 #     type: DT_INT64
7828 #   }
7829 #   output_arg {
7830 #     name: "keys"
7831 #     type: DT_STRING
7832 #   }
7833 #   output_arg {
7834 #     name: "values"
7835 #     type_list_attr: "component_types"
7836 #   }
7837 #   attr {
7838 #     name: "component_types"
7839 #     type: "list(type)"
7840 #     has_minimum: true
7841 #     minimum: 1
7842 #   }
7843 #   attr {
7844 #     name: "allow_small_batch"
7845 #     type: "bool"
7846 #     default_value {
7847 #       b: false
7848 #     }
7849 #   }
7850 #   attr {
7851 #     name: "wait_for_incomplete"
7852 #     type: "bool"
7853 #     default_value {
7854 #       b: false
7855 #     }
7856 #   }
7857 #   attr {
7858 #     name: "timeout_ms"
7859 #     type: "int"
7860 #     default_value {
7861 #       i: -1
7862 #     }
7863 #   }
7864 # }
7865 # op {
7866 #   name: "ConditionalAccumulator"
7867 #   output_arg {
7868 #     name: "handle"
7869 #     type: DT_STRING
7870 #     is_ref: true
7871 #   }
7872 #   attr {
7873 #     name: "dtype"
7874 #     type: "type"
7875 #     allowed_values {
7876 #       list {
7877 #         type: DT_FLOAT
7878 #         type: DT_DOUBLE
7879 #         type: DT_INT32
7880 #         type: DT_UINT8
7881 #         type: DT_INT16
7882 #         type: DT_INT8
7883 #         type: DT_COMPLEX64
7884 #         type: DT_INT64
7885 #         type: DT_QINT8
7886 #         type: DT_QUINT8
7887 #         type: DT_QINT32
7888 #         type: DT_BFLOAT16
7889 #         type: DT_UINT16
7890 #         type: DT_COMPLEX128
7891 #         type: DT_HALF
7892 #         type: DT_UINT32
7893 #         type: DT_UINT64
7894 #       }
7895 #     }
7896 #   }
7897 #   attr {
7898 #     name: "shape"
7899 #     type: "shape"
7900 #   }
7901 #   attr {
7902 #     name: "container"
7903 #     type: "string"
7904 #     default_value {
7905 #       s: ""
7906 #     }
7907 #   }
7908 #   attr {
7909 #     name: "shared_name"
7910 #     type: "string"
7911 #     default_value {
7912 #       s: ""
7913 #     }
7914 #   }
7915 #   attr {
7916 #     name: "reduction_type"
7917 #     type: "string"
7918 #     default_value {
7919 #       s: "MEAN"
7920 #     }
7921 #     allowed_values {
7922 #       list {
7923 #         s: "MEAN"
7924 #         s: "SUM"
7925 #       }
7926 #     }
7927 #   }
7928 #   is_stateful: true
7929 # }
7930 # op {
7931 #   name: "DeleteSessionTensor"
7932 #   input_arg {
7933 #     name: "handle"
7934 #     type: DT_STRING
7935 #   }
7936 #   is_stateful: true
7937 # }
7938 # op {
7939 #   name: "DynamicPartition"
7940 #   input_arg {
7941 #     name: "data"
7942 #     type_attr: "T"
7943 #   }
7944 #   input_arg {
7945 #     name: "partitions"
7946 #     type: DT_INT32
7947 #   }
7948 #   output_arg {
7949 #     name: "outputs"
7950 #     type_attr: "T"
7951 #     number_attr: "num_partitions"
7952 #   }
7953 #   attr {
7954 #     name: "num_partitions"
7955 #     type: "int"
7956 #     has_minimum: true
7957 #     minimum: 1
7958 #   }
7959 #   attr {
7960 #     name: "T"
7961 #     type: "type"
7962 #   }
7963 # }
7964 # op {
7965 #   name: "DynamicStitch"
7966 #   input_arg {
7967 #     name: "indices"
7968 #     type: DT_INT32
7969 #     number_attr: "N"
7970 #   }
7971 #   input_arg {
7972 #     name: "data"
7973 #     type_attr: "T"
7974 #     number_attr: "N"
7975 #   }
7976 #   output_arg {
7977 #     name: "merged"
7978 #     type_attr: "T"
7979 #   }
7980 #   attr {
7981 #     name: "N"
7982 #     type: "int"
7983 #     has_minimum: true
7984 #     minimum: 1
7985 #   }
7986 #   attr {
7987 #     name: "T"
7988 #     type: "type"
7989 #   }
7990 # }
7991 # op {
7992 #   name: "FIFOQueue"
7993 #   output_arg {
7994 #     name: "handle"
7995 #     type: DT_STRING
7996 #     is_ref: true
7997 #   }
7998 #   attr {
7999 #     name: "component_types"
8000 #     type: "list(type)"
8001 #     has_minimum: true
8002 #     minimum: 1
8003 #   }
8004 #   attr {
8005 #     name: "shapes"
8006 #     type: "list(shape)"
8007 #     default_value {
8008 #       list {
8009 #       }
8010 #     }
8011 #     has_minimum: true
8012 #   }
8013 #   attr {
8014 #     name: "capacity"
8015 #     type: "int"
8016 #     default_value {
8017 #       i: -1
8018 #     }
8019 #   }
8020 #   attr {
8021 #     name: "container"
8022 #     type: "string"
8023 #     default_value {
8024 #       s: ""
8025 #     }
8026 #   }
8027 #   attr {
8028 #     name: "shared_name"
8029 #     type: "string"
8030 #     default_value {
8031 #       s: ""
8032 #     }
8033 #   }
8034 #   is_stateful: true
8035 # }
8036 # op {
8037 #   name: "FIFOQueueV2"
8038 #   output_arg {
8039 #     name: "handle"
8040 #     type: DT_RESOURCE
8041 #   }
8042 #   attr {
8043 #     name: "component_types"
8044 #     type: "list(type)"
8045 #     has_minimum: true
8046 #     minimum: 1
8047 #   }
8048 #   attr {
8049 #     name: "shapes"
8050 #     type: "list(shape)"
8051 #     default_value {
8052 #       list {
8053 #       }
8054 #     }
8055 #     has_minimum: true
8056 #   }
8057 #   attr {
8058 #     name: "capacity"
8059 #     type: "int"
8060 #     default_value {
8061 #       i: -1
8062 #     }
8063 #   }
8064 #   attr {
8065 #     name: "container"
8066 #     type: "string"
8067 #     default_value {
8068 #       s: ""
8069 #     }
8070 #   }
8071 #   attr {
8072 #     name: "shared_name"
8073 #     type: "string"
8074 #     default_value {
8075 #       s: ""
8076 #     }
8077 #   }
8078 #   is_stateful: true
8079 # }
8080 # op {
8081 #   name: "FakeQueue"
8082 #   input_arg {
8083 #     name: "resource"
8084 #     type: DT_RESOURCE
8085 #   }
8086 #   output_arg {
8087 #     name: "handle"
8088 #     type: DT_STRING
8089 #     is_ref: true
8090 #   }
8091 #   is_stateful: true
8092 # }
8093 # op {
8094 #   name: "GetSessionHandle"
8095 #   input_arg {
8096 #     name: "value"
8097 #     type_attr: "T"
8098 #   }
8099 #   output_arg {
8100 #     name: "handle"
8101 #     type: DT_STRING
8102 #   }
8103 #   attr {
8104 #     name: "T"
8105 #     type: "type"
8106 #   }
8107 #   is_stateful: true
8108 # }
8109 # op {
8110 #   name: "GetSessionHandleV2"
8111 #   input_arg {
8112 #     name: "value"
8113 #     type_attr: "T"
8114 #   }
8115 #   output_arg {
8116 #     name: "handle"
8117 #     type: DT_RESOURCE
8118 #   }
8119 #   attr {
8120 #     name: "T"
8121 #     type: "type"
8122 #   }
8123 #   is_stateful: true
8124 # }
8125 # op {
8126 #   name: "GetSessionTensor"
8127 #   input_arg {
8128 #     name: "handle"
8129 #     type: DT_STRING
8130 #   }
8131 #   output_arg {
8132 #     name: "value"
8133 #     type_attr: "dtype"
8134 #   }
8135 #   attr {
8136 #     name: "dtype"
8137 #     type: "type"
8138 #   }
8139 #   is_stateful: true
8140 # }
8141 # op {
8142 #   name: "MapClear"
8143 #   attr {
8144 #     name: "capacity"
8145 #     type: "int"
8146 #     default_value {
8147 #       i: 0
8148 #     }
8149 #     has_minimum: true
8150 #   }
8151 #   attr {
8152 #     name: "memory_limit"
8153 #     type: "int"
8154 #     default_value {
8155 #       i: 0
8156 #     }
8157 #     has_minimum: true
8158 #   }
8159 #   attr {
8160 #     name: "dtypes"
8161 #     type: "list(type)"
8162 #   }
8163 #   attr {
8164 #     name: "container"
8165 #     type: "string"
8166 #     default_value {
8167 #       s: ""
8168 #     }
8169 #   }
8170 #   attr {
8171 #     name: "shared_name"
8172 #     type: "string"
8173 #     default_value {
8174 #       s: ""
8175 #     }
8176 #   }
8177 #   is_stateful: true
8178 # }
8179 # op {
8180 #   name: "MapIncompleteSize"
8181 #   output_arg {
8182 #     name: "size"
8183 #     type: DT_INT32
8184 #   }
8185 #   attr {
8186 #     name: "capacity"
8187 #     type: "int"
8188 #     default_value {
8189 #       i: 0
8190 #     }
8191 #     has_minimum: true
8192 #   }
8193 #   attr {
8194 #     name: "memory_limit"
8195 #     type: "int"
8196 #     default_value {
8197 #       i: 0
8198 #     }
8199 #     has_minimum: true
8200 #   }
8201 #   attr {
8202 #     name: "dtypes"
8203 #     type: "list(type)"
8204 #   }
8205 #   attr {
8206 #     name: "container"
8207 #     type: "string"
8208 #     default_value {
8209 #       s: ""
8210 #     }
8211 #   }
8212 #   attr {
8213 #     name: "shared_name"
8214 #     type: "string"
8215 #     default_value {
8216 #       s: ""
8217 #     }
8218 #   }
8219 #   is_stateful: true
8220 # }
8221 # op {
8222 #   name: "MapPeek"
8223 #   input_arg {
8224 #     name: "key"
8225 #     type: DT_INT64
8226 #   }
8227 #   input_arg {
8228 #     name: "indices"
8229 #     type: DT_INT32
8230 #   }
8231 #   output_arg {
8232 #     name: "values"
8233 #     type_list_attr: "dtypes"
8234 #   }
8235 #   attr {
8236 #     name: "capacity"
8237 #     type: "int"
8238 #     default_value {
8239 #       i: 0
8240 #     }
8241 #     has_minimum: true
8242 #   }
8243 #   attr {
8244 #     name: "memory_limit"
8245 #     type: "int"
8246 #     default_value {
8247 #       i: 0
8248 #     }
8249 #     has_minimum: true
8250 #   }
8251 #   attr {
8252 #     name: "dtypes"
8253 #     type: "list(type)"
8254 #     has_minimum: true
8255 #     minimum: 1
8256 #   }
8257 #   attr {
8258 #     name: "container"
8259 #     type: "string"
8260 #     default_value {
8261 #       s: ""
8262 #     }
8263 #   }
8264 #   attr {
8265 #     name: "shared_name"
8266 #     type: "string"
8267 #     default_value {
8268 #       s: ""
8269 #     }
8270 #   }
8271 #   is_stateful: true
8272 # }
8273 # op {
8274 #   name: "MapSize"
8275 #   output_arg {
8276 #     name: "size"
8277 #     type: DT_INT32
8278 #   }
8279 #   attr {
8280 #     name: "capacity"
8281 #     type: "int"
8282 #     default_value {
8283 #       i: 0
8284 #     }
8285 #     has_minimum: true
8286 #   }
8287 #   attr {
8288 #     name: "memory_limit"
8289 #     type: "int"
8290 #     default_value {
8291 #       i: 0
8292 #     }
8293 #     has_minimum: true
8294 #   }
8295 #   attr {
8296 #     name: "dtypes"
8297 #     type: "list(type)"
8298 #   }
8299 #   attr {
8300 #     name: "container"
8301 #     type: "string"
8302 #     default_value {
8303 #       s: ""
8304 #     }
8305 #   }
8306 #   attr {
8307 #     name: "shared_name"
8308 #     type: "string"
8309 #     default_value {
8310 #       s: ""
8311 #     }
8312 #   }
8313 #   is_stateful: true
8314 # }
8315 # op {
8316 #   name: "MapStage"
8317 #   input_arg {
8318 #     name: "key"
8319 #     type: DT_INT64
8320 #   }
8321 #   input_arg {
8322 #     name: "indices"
8323 #     type: DT_INT32
8324 #   }
8325 #   input_arg {
8326 #     name: "values"
8327 #     type_list_attr: "fake_dtypes"
8328 #   }
8329 #   attr {
8330 #     name: "capacity"
8331 #     type: "int"
8332 #     default_value {
8333 #       i: 0
8334 #     }
8335 #     has_minimum: true
8336 #   }
8337 #   attr {
8338 #     name: "memory_limit"
8339 #     type: "int"
8340 #     default_value {
8341 #       i: 0
8342 #     }
8343 #     has_minimum: true
8344 #   }
8345 #   attr {
8346 #     name: "dtypes"
8347 #     type: "list(type)"
8348 #   }
8349 #   attr {
8350 #     name: "fake_dtypes"
8351 #     type: "list(type)"
8352 #     has_minimum: true
8353 #     minimum: 1
8354 #   }
8355 #   attr {
8356 #     name: "container"
8357 #     type: "string"
8358 #     default_value {
8359 #       s: ""
8360 #     }
8361 #   }
8362 #   attr {
8363 #     name: "shared_name"
8364 #     type: "string"
8365 #     default_value {
8366 #       s: ""
8367 #     }
8368 #   }
8369 #   is_stateful: true
8370 # }
8371 # op {
8372 #   name: "MapUnstage"
8373 #   input_arg {
8374 #     name: "key"
8375 #     type: DT_INT64
8376 #   }
8377 #   input_arg {
8378 #     name: "indices"
8379 #     type: DT_INT32
8380 #   }
8381 #   output_arg {
8382 #     name: "values"
8383 #     type_list_attr: "dtypes"
8384 #   }
8385 #   attr {
8386 #     name: "capacity"
8387 #     type: "int"
8388 #     default_value {
8389 #       i: 0
8390 #     }
8391 #     has_minimum: true
8392 #   }
8393 #   attr {
8394 #     name: "memory_limit"
8395 #     type: "int"
8396 #     default_value {
8397 #       i: 0
8398 #     }
8399 #     has_minimum: true
8400 #   }
8401 #   attr {
8402 #     name: "dtypes"
8403 #     type: "list(type)"
8404 #     has_minimum: true
8405 #     minimum: 1
8406 #   }
8407 #   attr {
8408 #     name: "container"
8409 #     type: "string"
8410 #     default_value {
8411 #       s: ""
8412 #     }
8413 #   }
8414 #   attr {
8415 #     name: "shared_name"
8416 #     type: "string"
8417 #     default_value {
8418 #       s: ""
8419 #     }
8420 #   }
8421 #   is_stateful: true
8422 # }
8423 # op {
8424 #   name: "MapUnstageNoKey"
8425 #   input_arg {
8426 #     name: "indices"
8427 #     type: DT_INT32
8428 #   }
8429 #   output_arg {
8430 #     name: "key"
8431 #     type: DT_INT64
8432 #   }
8433 #   output_arg {
8434 #     name: "values"
8435 #     type_list_attr: "dtypes"
8436 #   }
8437 #   attr {
8438 #     name: "capacity"
8439 #     type: "int"
8440 #     default_value {
8441 #       i: 0
8442 #     }
8443 #     has_minimum: true
8444 #   }
8445 #   attr {
8446 #     name: "memory_limit"
8447 #     type: "int"
8448 #     default_value {
8449 #       i: 0
8450 #     }
8451 #     has_minimum: true
8452 #   }
8453 #   attr {
8454 #     name: "dtypes"
8455 #     type: "list(type)"
8456 #     has_minimum: true
8457 #     minimum: 1
8458 #   }
8459 #   attr {
8460 #     name: "container"
8461 #     type: "string"
8462 #     default_value {
8463 #       s: ""
8464 #     }
8465 #   }
8466 #   attr {
8467 #     name: "shared_name"
8468 #     type: "string"
8469 #     default_value {
8470 #       s: ""
8471 #     }
8472 #   }
8473 #   is_stateful: true
8474 # }
8475 # op {
8476 #   name: "OrderedMapClear"
8477 #   attr {
8478 #     name: "capacity"
8479 #     type: "int"
8480 #     default_value {
8481 #       i: 0
8482 #     }
8483 #     has_minimum: true
8484 #   }
8485 #   attr {
8486 #     name: "memory_limit"
8487 #     type: "int"
8488 #     default_value {
8489 #       i: 0
8490 #     }
8491 #     has_minimum: true
8492 #   }
8493 #   attr {
8494 #     name: "dtypes"
8495 #     type: "list(type)"
8496 #   }
8497 #   attr {
8498 #     name: "container"
8499 #     type: "string"
8500 #     default_value {
8501 #       s: ""
8502 #     }
8503 #   }
8504 #   attr {
8505 #     name: "shared_name"
8506 #     type: "string"
8507 #     default_value {
8508 #       s: ""
8509 #     }
8510 #   }
8511 #   is_stateful: true
8512 # }
8513 # op {
8514 #   name: "OrderedMapIncompleteSize"
8515 #   output_arg {
8516 #     name: "size"
8517 #     type: DT_INT32
8518 #   }
8519 #   attr {
8520 #     name: "capacity"
8521 #     type: "int"
8522 #     default_value {
8523 #       i: 0
8524 #     }
8525 #     has_minimum: true
8526 #   }
8527 #   attr {
8528 #     name: "memory_limit"
8529 #     type: "int"
8530 #     default_value {
8531 #       i: 0
8532 #     }
8533 #     has_minimum: true
8534 #   }
8535 #   attr {
8536 #     name: "dtypes"
8537 #     type: "list(type)"
8538 #   }
8539 #   attr {
8540 #     name: "container"
8541 #     type: "string"
8542 #     default_value {
8543 #       s: ""
8544 #     }
8545 #   }
8546 #   attr {
8547 #     name: "shared_name"
8548 #     type: "string"
8549 #     default_value {
8550 #       s: ""
8551 #     }
8552 #   }
8553 #   is_stateful: true
8554 # }
8555 # op {
8556 #   name: "OrderedMapPeek"
8557 #   input_arg {
8558 #     name: "key"
8559 #     type: DT_INT64
8560 #   }
8561 #   input_arg {
8562 #     name: "indices"
8563 #     type: DT_INT32
8564 #   }
8565 #   output_arg {
8566 #     name: "values"
8567 #     type_list_attr: "dtypes"
8568 #   }
8569 #   attr {
8570 #     name: "capacity"
8571 #     type: "int"
8572 #     default_value {
8573 #       i: 0
8574 #     }
8575 #     has_minimum: true
8576 #   }
8577 #   attr {
8578 #     name: "memory_limit"
8579 #     type: "int"
8580 #     default_value {
8581 #       i: 0
8582 #     }
8583 #     has_minimum: true
8584 #   }
8585 #   attr {
8586 #     name: "dtypes"
8587 #     type: "list(type)"
8588 #     has_minimum: true
8589 #     minimum: 1
8590 #   }
8591 #   attr {
8592 #     name: "container"
8593 #     type: "string"
8594 #     default_value {
8595 #       s: ""
8596 #     }
8597 #   }
8598 #   attr {
8599 #     name: "shared_name"
8600 #     type: "string"
8601 #     default_value {
8602 #       s: ""
8603 #     }
8604 #   }
8605 #   is_stateful: true
8606 # }
8607 # op {
8608 #   name: "OrderedMapSize"
8609 #   output_arg {
8610 #     name: "size"
8611 #     type: DT_INT32
8612 #   }
8613 #   attr {
8614 #     name: "capacity"
8615 #     type: "int"
8616 #     default_value {
8617 #       i: 0
8618 #     }
8619 #     has_minimum: true
8620 #   }
8621 #   attr {
8622 #     name: "memory_limit"
8623 #     type: "int"
8624 #     default_value {
8625 #       i: 0
8626 #     }
8627 #     has_minimum: true
8628 #   }
8629 #   attr {
8630 #     name: "dtypes"
8631 #     type: "list(type)"
8632 #   }
8633 #   attr {
8634 #     name: "container"
8635 #     type: "string"
8636 #     default_value {
8637 #       s: ""
8638 #     }
8639 #   }
8640 #   attr {
8641 #     name: "shared_name"
8642 #     type: "string"
8643 #     default_value {
8644 #       s: ""
8645 #     }
8646 #   }
8647 #   is_stateful: true
8648 # }
8649 # op {
8650 #   name: "OrderedMapStage"
8651 #   input_arg {
8652 #     name: "key"
8653 #     type: DT_INT64
8654 #   }
8655 #   input_arg {
8656 #     name: "indices"
8657 #     type: DT_INT32
8658 #   }
8659 #   input_arg {
8660 #     name: "values"
8661 #     type_list_attr: "fake_dtypes"
8662 #   }
8663 #   attr {
8664 #     name: "capacity"
8665 #     type: "int"
8666 #     default_value {
8667 #       i: 0
8668 #     }
8669 #     has_minimum: true
8670 #   }
8671 #   attr {
8672 #     name: "memory_limit"
8673 #     type: "int"
8674 #     default_value {
8675 #       i: 0
8676 #     }
8677 #     has_minimum: true
8678 #   }
8679 #   attr {
8680 #     name: "dtypes"
8681 #     type: "list(type)"
8682 #   }
8683 #   attr {
8684 #     name: "fake_dtypes"
8685 #     type: "list(type)"
8686 #     has_minimum: true
8687 #     minimum: 1
8688 #   }
8689 #   attr {
8690 #     name: "container"
8691 #     type: "string"
8692 #     default_value {
8693 #       s: ""
8694 #     }
8695 #   }
8696 #   attr {
8697 #     name: "shared_name"
8698 #     type: "string"
8699 #     default_value {
8700 #       s: ""
8701 #     }
8702 #   }
8703 #   is_stateful: true
8704 # }
8705 # op {
8706 #   name: "OrderedMapUnstage"
8707 #   input_arg {
8708 #     name: "key"
8709 #     type: DT_INT64
8710 #   }
8711 #   input_arg {
8712 #     name: "indices"
8713 #     type: DT_INT32
8714 #   }
8715 #   output_arg {
8716 #     name: "values"
8717 #     type_list_attr: "dtypes"
8718 #   }
8719 #   attr {
8720 #     name: "capacity"
8721 #     type: "int"
8722 #     default_value {
8723 #       i: 0
8724 #     }
8725 #     has_minimum: true
8726 #   }
8727 #   attr {
8728 #     name: "memory_limit"
8729 #     type: "int"
8730 #     default_value {
8731 #       i: 0
8732 #     }
8733 #     has_minimum: true
8734 #   }
8735 #   attr {
8736 #     name: "dtypes"
8737 #     type: "list(type)"
8738 #     has_minimum: true
8739 #     minimum: 1
8740 #   }
8741 #   attr {
8742 #     name: "container"
8743 #     type: "string"
8744 #     default_value {
8745 #       s: ""
8746 #     }
8747 #   }
8748 #   attr {
8749 #     name: "shared_name"
8750 #     type: "string"
8751 #     default_value {
8752 #       s: ""
8753 #     }
8754 #   }
8755 #   is_stateful: true
8756 # }
8757 # op {
8758 #   name: "OrderedMapUnstageNoKey"
8759 #   input_arg {
8760 #     name: "indices"
8761 #     type: DT_INT32
8762 #   }
8763 #   output_arg {
8764 #     name: "key"
8765 #     type: DT_INT64
8766 #   }
8767 #   output_arg {
8768 #     name: "values"
8769 #     type_list_attr: "dtypes"
8770 #   }
8771 #   attr {
8772 #     name: "capacity"
8773 #     type: "int"
8774 #     default_value {
8775 #       i: 0
8776 #     }
8777 #     has_minimum: true
8778 #   }
8779 #   attr {
8780 #     name: "memory_limit"
8781 #     type: "int"
8782 #     default_value {
8783 #       i: 0
8784 #     }
8785 #     has_minimum: true
8786 #   }
8787 #   attr {
8788 #     name: "dtypes"
8789 #     type: "list(type)"
8790 #     has_minimum: true
8791 #     minimum: 1
8792 #   }
8793 #   attr {
8794 #     name: "container"
8795 #     type: "string"
8796 #     default_value {
8797 #       s: ""
8798 #     }
8799 #   }
8800 #   attr {
8801 #     name: "shared_name"
8802 #     type: "string"
8803 #     default_value {
8804 #       s: ""
8805 #     }
8806 #   }
8807 #   is_stateful: true
8808 # }
8809 # op {
8810 #   name: "PaddingFIFOQueue"
8811 #   output_arg {
8812 #     name: "handle"
8813 #     type: DT_STRING
8814 #     is_ref: true
8815 #   }
8816 #   attr {
8817 #     name: "component_types"
8818 #     type: "list(type)"
8819 #     has_minimum: true
8820 #     minimum: 1
8821 #   }
8822 #   attr {
8823 #     name: "shapes"
8824 #     type: "list(shape)"
8825 #     default_value {
8826 #       list {
8827 #       }
8828 #     }
8829 #     has_minimum: true
8830 #   }
8831 #   attr {
8832 #     name: "capacity"
8833 #     type: "int"
8834 #     default_value {
8835 #       i: -1
8836 #     }
8837 #   }
8838 #   attr {
8839 #     name: "container"
8840 #     type: "string"
8841 #     default_value {
8842 #       s: ""
8843 #     }
8844 #   }
8845 #   attr {
8846 #     name: "shared_name"
8847 #     type: "string"
8848 #     default_value {
8849 #       s: ""
8850 #     }
8851 #   }
8852 #   is_stateful: true
8853 # }
8854 # op {
8855 #   name: "PaddingFIFOQueueV2"
8856 #   output_arg {
8857 #     name: "handle"
8858 #     type: DT_RESOURCE
8859 #   }
8860 #   attr {
8861 #     name: "component_types"
8862 #     type: "list(type)"
8863 #     has_minimum: true
8864 #     minimum: 1
8865 #   }
8866 #   attr {
8867 #     name: "shapes"
8868 #     type: "list(shape)"
8869 #     default_value {
8870 #       list {
8871 #       }
8872 #     }
8873 #     has_minimum: true
8874 #   }
8875 #   attr {
8876 #     name: "capacity"
8877 #     type: "int"
8878 #     default_value {
8879 #       i: -1
8880 #     }
8881 #   }
8882 #   attr {
8883 #     name: "container"
8884 #     type: "string"
8885 #     default_value {
8886 #       s: ""
8887 #     }
8888 #   }
8889 #   attr {
8890 #     name: "shared_name"
8891 #     type: "string"
8892 #     default_value {
8893 #       s: ""
8894 #     }
8895 #   }
8896 #   is_stateful: true
8897 # }
8898 # op {
8899 #   name: "ParallelDynamicStitch"
8900 #   input_arg {
8901 #     name: "indices"
8902 #     type: DT_INT32
8903 #     number_attr: "N"
8904 #   }
8905 #   input_arg {
8906 #     name: "data"
8907 #     type_attr: "T"
8908 #     number_attr: "N"
8909 #   }
8910 #   output_arg {
8911 #     name: "merged"
8912 #     type_attr: "T"
8913 #   }
8914 #   attr {
8915 #     name: "N"
8916 #     type: "int"
8917 #     has_minimum: true
8918 #     minimum: 1
8919 #   }
8920 #   attr {
8921 #     name: "T"
8922 #     type: "type"
8923 #   }
8924 # }
8925 # op {
8926 #   name: "PriorityQueue"
8927 #   output_arg {
8928 #     name: "handle"
8929 #     type: DT_STRING
8930 #     is_ref: true
8931 #   }
8932 #   attr {
8933 #     name: "component_types"
8934 #     type: "list(type)"
8935 #     default_value {
8936 #       list {
8937 #       }
8938 #     }
8939 #     has_minimum: true
8940 #   }
8941 #   attr {
8942 #     name: "shapes"
8943 #     type: "list(shape)"
8944 #     has_minimum: true
8945 #   }
8946 #   attr {
8947 #     name: "capacity"
8948 #     type: "int"
8949 #     default_value {
8950 #       i: -1
8951 #     }
8952 #   }
8953 #   attr {
8954 #     name: "container"
8955 #     type: "string"
8956 #     default_value {
8957 #       s: ""
8958 #     }
8959 #   }
8960 #   attr {
8961 #     name: "shared_name"
8962 #     type: "string"
8963 #     default_value {
8964 #       s: ""
8965 #     }
8966 #   }
8967 #   is_stateful: true
8968 # }
8969 # op {
8970 #   name: "PriorityQueueV2"
8971 #   output_arg {
8972 #     name: "handle"
8973 #     type: DT_RESOURCE
8974 #   }
8975 #   attr {
8976 #     name: "component_types"
8977 #     type: "list(type)"
8978 #     default_value {
8979 #       list {
8980 #       }
8981 #     }
8982 #     has_minimum: true
8983 #   }
8984 #   attr {
8985 #     name: "shapes"
8986 #     type: "list(shape)"
8987 #     has_minimum: true
8988 #   }
8989 #   attr {
8990 #     name: "capacity"
8991 #     type: "int"
8992 #     default_value {
8993 #       i: -1
8994 #     }
8995 #   }
8996 #   attr {
8997 #     name: "container"
8998 #     type: "string"
8999 #     default_value {
9000 #       s: ""
9001 #     }
9002 #   }
9003 #   attr {
9004 #     name: "shared_name"
9005 #     type: "string"
9006 #     default_value {
9007 #       s: ""
9008 #     }
9009 #   }
9010 #   is_stateful: true
9011 # }
9012 # op {
9013 #   name: "QueueClose"
9014 #   input_arg {
9015 #     name: "handle"
9016 #     type: DT_STRING
9017 #     is_ref: true
9018 #   }
9019 #   attr {
9020 #     name: "cancel_pending_enqueues"
9021 #     type: "bool"
9022 #     default_value {
9023 #       b: false
9024 #     }
9025 #   }
9026 # }
9027 # op {
9028 #   name: "QueueCloseV2"
9029 #   input_arg {
9030 #     name: "handle"
9031 #     type: DT_RESOURCE
9032 #   }
9033 #   attr {
9034 #     name: "cancel_pending_enqueues"
9035 #     type: "bool"
9036 #     default_value {
9037 #       b: false
9038 #     }
9039 #   }
9040 #   is_stateful: true
9041 # }
9042 # op {
9043 #   name: "QueueDequeue"
9044 #   input_arg {
9045 #     name: "handle"
9046 #     type: DT_STRING
9047 #     is_ref: true
9048 #   }
9049 #   output_arg {
9050 #     name: "components"
9051 #     type_list_attr: "component_types"
9052 #   }
9053 #   attr {
9054 #     name: "component_types"
9055 #     type: "list(type)"
9056 #     has_minimum: true
9057 #     minimum: 1
9058 #   }
9059 #   attr {
9060 #     name: "timeout_ms"
9061 #     type: "int"
9062 #     default_value {
9063 #       i: -1
9064 #     }
9065 #   }
9066 # }
9067 # op {
9068 #   name: "QueueDequeueMany"
9069 #   input_arg {
9070 #     name: "handle"
9071 #     type: DT_STRING
9072 #     is_ref: true
9073 #   }
9074 #   input_arg {
9075 #     name: "n"
9076 #     type: DT_INT32
9077 #   }
9078 #   output_arg {
9079 #     name: "components"
9080 #     type_list_attr: "component_types"
9081 #   }
9082 #   attr {
9083 #     name: "component_types"
9084 #     type: "list(type)"
9085 #     has_minimum: true
9086 #     minimum: 1
9087 #   }
9088 #   attr {
9089 #     name: "timeout_ms"
9090 #     type: "int"
9091 #     default_value {
9092 #       i: -1
9093 #     }
9094 #   }
9095 # }
9096 # op {
9097 #   name: "QueueDequeueManyV2"
9098 #   input_arg {
9099 #     name: "handle"
9100 #     type: DT_RESOURCE
9101 #   }
9102 #   input_arg {
9103 #     name: "n"
9104 #     type: DT_INT32
9105 #   }
9106 #   output_arg {
9107 #     name: "components"
9108 #     type_list_attr: "component_types"
9109 #   }
9110 #   attr {
9111 #     name: "component_types"
9112 #     type: "list(type)"
9113 #     has_minimum: true
9114 #     minimum: 1
9115 #   }
9116 #   attr {
9117 #     name: "timeout_ms"
9118 #     type: "int"
9119 #     default_value {
9120 #       i: -1
9121 #     }
9122 #   }
9123 #   is_stateful: true
9124 # }
9125 # op {
9126 #   name: "QueueDequeueUpTo"
9127 #   input_arg {
9128 #     name: "handle"
9129 #     type: DT_STRING
9130 #     is_ref: true
9131 #   }
9132 #   input_arg {
9133 #     name: "n"
9134 #     type: DT_INT32
9135 #   }
9136 #   output_arg {
9137 #     name: "components"
9138 #     type_list_attr: "component_types"
9139 #   }
9140 #   attr {
9141 #     name: "component_types"
9142 #     type: "list(type)"
9143 #     has_minimum: true
9144 #     minimum: 1
9145 #   }
9146 #   attr {
9147 #     name: "timeout_ms"
9148 #     type: "int"
9149 #     default_value {
9150 #       i: -1
9151 #     }
9152 #   }
9153 # }
9154 # op {
9155 #   name: "QueueDequeueUpToV2"
9156 #   input_arg {
9157 #     name: "handle"
9158 #     type: DT_RESOURCE
9159 #   }
9160 #   input_arg {
9161 #     name: "n"
9162 #     type: DT_INT32
9163 #   }
9164 #   output_arg {
9165 #     name: "components"
9166 #     type_list_attr: "component_types"
9167 #   }
9168 #   attr {
9169 #     name: "component_types"
9170 #     type: "list(type)"
9171 #     has_minimum: true
9172 #     minimum: 1
9173 #   }
9174 #   attr {
9175 #     name: "timeout_ms"
9176 #     type: "int"
9177 #     default_value {
9178 #       i: -1
9179 #     }
9180 #   }
9181 #   is_stateful: true
9182 # }
9183 # op {
9184 #   name: "QueueDequeueV2"
9185 #   input_arg {
9186 #     name: "handle"
9187 #     type: DT_RESOURCE
9188 #   }
9189 #   output_arg {
9190 #     name: "components"
9191 #     type_list_attr: "component_types"
9192 #   }
9193 #   attr {
9194 #     name: "component_types"
9195 #     type: "list(type)"
9196 #     has_minimum: true
9197 #     minimum: 1
9198 #   }
9199 #   attr {
9200 #     name: "timeout_ms"
9201 #     type: "int"
9202 #     default_value {
9203 #       i: -1
9204 #     }
9205 #   }
9206 #   is_stateful: true
9207 # }
9208 # op {
9209 #   name: "QueueEnqueue"
9210 #   input_arg {
9211 #     name: "handle"
9212 #     type: DT_STRING
9213 #     is_ref: true
9214 #   }
9215 #   input_arg {
9216 #     name: "components"
9217 #     type_list_attr: "Tcomponents"
9218 #   }
9219 #   attr {
9220 #     name: "Tcomponents"
9221 #     type: "list(type)"
9222 #     has_minimum: true
9223 #     minimum: 1
9224 #   }
9225 #   attr {
9226 #     name: "timeout_ms"
9227 #     type: "int"
9228 #     default_value {
9229 #       i: -1
9230 #     }
9231 #   }
9232 # }
9233 # op {
9234 #   name: "QueueEnqueueMany"
9235 #   input_arg {
9236 #     name: "handle"
9237 #     type: DT_STRING
9238 #     is_ref: true
9239 #   }
9240 #   input_arg {
9241 #     name: "components"
9242 #     type_list_attr: "Tcomponents"
9243 #   }
9244 #   attr {
9245 #     name: "Tcomponents"
9246 #     type: "list(type)"
9247 #     has_minimum: true
9248 #     minimum: 1
9249 #   }
9250 #   attr {
9251 #     name: "timeout_ms"
9252 #     type: "int"
9253 #     default_value {
9254 #       i: -1
9255 #     }
9256 #   }
9257 # }
9258 # op {
9259 #   name: "QueueEnqueueManyV2"
9260 #   input_arg {
9261 #     name: "handle"
9262 #     type: DT_RESOURCE
9263 #   }
9264 #   input_arg {
9265 #     name: "components"
9266 #     type_list_attr: "Tcomponents"
9267 #   }
9268 #   attr {
9269 #     name: "Tcomponents"
9270 #     type: "list(type)"
9271 #     has_minimum: true
9272 #     minimum: 1
9273 #   }
9274 #   attr {
9275 #     name: "timeout_ms"
9276 #     type: "int"
9277 #     default_value {
9278 #       i: -1
9279 #     }
9280 #   }
9281 #   is_stateful: true
9282 # }
9283 # op {
9284 #   name: "QueueEnqueueV2"
9285 #   input_arg {
9286 #     name: "handle"
9287 #     type: DT_RESOURCE
9288 #   }
9289 #   input_arg {
9290 #     name: "components"
9291 #     type_list_attr: "Tcomponents"
9292 #   }
9293 #   attr {
9294 #     name: "Tcomponents"
9295 #     type: "list(type)"
9296 #     has_minimum: true
9297 #     minimum: 1
9298 #   }
9299 #   attr {
9300 #     name: "timeout_ms"
9301 #     type: "int"
9302 #     default_value {
9303 #       i: -1
9304 #     }
9305 #   }
9306 #   is_stateful: true
9307 # }
9308 # op {
9309 #   name: "QueueIsClosed"
9310 #   input_arg {
9311 #     name: "handle"
9312 #     type: DT_STRING
9313 #     is_ref: true
9314 #   }
9315 #   output_arg {
9316 #     name: "is_closed"
9317 #     type: DT_BOOL
9318 #   }
9319 # }
9320 # op {
9321 #   name: "QueueIsClosedV2"
9322 #   input_arg {
9323 #     name: "handle"
9324 #     type: DT_RESOURCE
9325 #   }
9326 #   output_arg {
9327 #     name: "is_closed"
9328 #     type: DT_BOOL
9329 #   }
9330 #   is_stateful: true
9331 # }
9332 # op {
9333 #   name: "QueueSize"
9334 #   input_arg {
9335 #     name: "handle"
9336 #     type: DT_STRING
9337 #     is_ref: true
9338 #   }
9339 #   output_arg {
9340 #     name: "size"
9341 #     type: DT_INT32
9342 #   }
9343 # }
9344 # op {
9345 #   name: "QueueSizeV2"
9346 #   input_arg {
9347 #     name: "handle"
9348 #     type: DT_RESOURCE
9349 #   }
9350 #   output_arg {
9351 #     name: "size"
9352 #     type: DT_INT32
9353 #   }
9354 #   is_stateful: true
9355 # }
9356 # op {
9357 #   name: "RandomShuffleQueue"
9358 #   output_arg {
9359 #     name: "handle"
9360 #     type: DT_STRING
9361 #     is_ref: true
9362 #   }
9363 #   attr {
9364 #     name: "component_types"
9365 #     type: "list(type)"
9366 #     has_minimum: true
9367 #     minimum: 1
9368 #   }
9369 #   attr {
9370 #     name: "shapes"
9371 #     type: "list(shape)"
9372 #     default_value {
9373 #       list {
9374 #       }
9375 #     }
9376 #     has_minimum: true
9377 #   }
9378 #   attr {
9379 #     name: "capacity"
9380 #     type: "int"
9381 #     default_value {
9382 #       i: -1
9383 #     }
9384 #   }
9385 #   attr {
9386 #     name: "min_after_dequeue"
9387 #     type: "int"
9388 #     default_value {
9389 #       i: 0
9390 #     }
9391 #   }
9392 #   attr {
9393 #     name: "seed"
9394 #     type: "int"
9395 #     default_value {
9396 #       i: 0
9397 #     }
9398 #   }
9399 #   attr {
9400 #     name: "seed2"
9401 #     type: "int"
9402 #     default_value {
9403 #       i: 0
9404 #     }
9405 #   }
9406 #   attr {
9407 #     name: "container"
9408 #     type: "string"
9409 #     default_value {
9410 #       s: ""
9411 #     }
9412 #   }
9413 #   attr {
9414 #     name: "shared_name"
9415 #     type: "string"
9416 #     default_value {
9417 #       s: ""
9418 #     }
9419 #   }
9420 #   is_stateful: true
9421 # }
9422 # op {
9423 #   name: "RandomShuffleQueueV2"
9424 #   output_arg {
9425 #     name: "handle"
9426 #     type: DT_RESOURCE
9427 #   }
9428 #   attr {
9429 #     name: "component_types"
9430 #     type: "list(type)"
9431 #     has_minimum: true
9432 #     minimum: 1
9433 #   }
9434 #   attr {
9435 #     name: "shapes"
9436 #     type: "list(shape)"
9437 #     default_value {
9438 #       list {
9439 #       }
9440 #     }
9441 #     has_minimum: true
9442 #   }
9443 #   attr {
9444 #     name: "capacity"
9445 #     type: "int"
9446 #     default_value {
9447 #       i: -1
9448 #     }
9449 #   }
9450 #   attr {
9451 #     name: "min_after_dequeue"
9452 #     type: "int"
9453 #     default_value {
9454 #       i: 0
9455 #     }
9456 #   }
9457 #   attr {
9458 #     name: "seed"
9459 #     type: "int"
9460 #     default_value {
9461 #       i: 0
9462 #     }
9463 #   }
9464 #   attr {
9465 #     name: "seed2"
9466 #     type: "int"
9467 #     default_value {
9468 #       i: 0
9469 #     }
9470 #   }
9471 #   attr {
9472 #     name: "container"
9473 #     type: "string"
9474 #     default_value {
9475 #       s: ""
9476 #     }
9477 #   }
9478 #   attr {
9479 #     name: "shared_name"
9480 #     type: "string"
9481 #     default_value {
9482 #       s: ""
9483 #     }
9484 #   }
9485 #   is_stateful: true
9486 # }
9487 # op {
9488 #   name: "RecordInput"
9489 #   output_arg {
9490 #     name: "records"
9491 #     type: DT_STRING
9492 #   }
9493 #   attr {
9494 #     name: "file_pattern"
9495 #     type: "string"
9496 #   }
9497 #   attr {
9498 #     name: "file_random_seed"
9499 #     type: "int"
9500 #     default_value {
9501 #       i: 301
9502 #     }
9503 #   }
9504 #   attr {
9505 #     name: "file_shuffle_shift_ratio"
9506 #     type: "float"
9507 #     default_value {
9508 #       f: 0
9509 #     }
9510 #   }
9511 #   attr {
9512 #     name: "file_buffer_size"
9513 #     type: "int"
9514 #     default_value {
9515 #       i: 10000
9516 #     }
9517 #   }
9518 #   attr {
9519 #     name: "file_parallelism"
9520 #     type: "int"
9521 #     default_value {
9522 #       i: 16
9523 #     }
9524 #   }
9525 #   attr {
9526 #     name: "batch_size"
9527 #     type: "int"
9528 #     default_value {
9529 #       i: 32
9530 #     }
9531 #   }
9532 #   attr {
9533 #     name: "compression_type"
9534 #     type: "string"
9535 #     default_value {
9536 #       s: ""
9537 #     }
9538 #   }
9539 #   is_stateful: true
9540 # }
9541 # op {
9542 #   name: "SparseAccumulatorApplyGradient"
9543 #   input_arg {
9544 #     name: "handle"
9545 #     type: DT_STRING
9546 #     is_ref: true
9547 #   }
9548 #   input_arg {
9549 #     name: "local_step"
9550 #     type: DT_INT64
9551 #   }
9552 #   input_arg {
9553 #     name: "gradient_indices"
9554 #     type: DT_INT64
9555 #   }
9556 #   input_arg {
9557 #     name: "gradient_values"
9558 #     type_attr: "dtype"
9559 #   }
9560 #   input_arg {
9561 #     name: "gradient_shape"
9562 #     type: DT_INT64
9563 #   }
9564 #   attr {
9565 #     name: "dtype"
9566 #     type: "type"
9567 #     allowed_values {
9568 #       list {
9569 #         type: DT_FLOAT
9570 #         type: DT_DOUBLE
9571 #         type: DT_INT32
9572 #         type: DT_UINT8
9573 #         type: DT_INT16
9574 #         type: DT_INT8
9575 #         type: DT_COMPLEX64
9576 #         type: DT_INT64
9577 #         type: DT_QINT8
9578 #         type: DT_QUINT8
9579 #         type: DT_QINT32
9580 #         type: DT_BFLOAT16
9581 #         type: DT_UINT16
9582 #         type: DT_COMPLEX128
9583 #         type: DT_HALF
9584 #         type: DT_UINT32
9585 #         type: DT_UINT64
9586 #       }
9587 #     }
9588 #   }
9589 #   attr {
9590 #     name: "has_known_shape"
9591 #     type: "bool"
9592 #   }
9593 # }
9594 # op {
9595 #   name: "SparseAccumulatorTakeGradient"
9596 #   input_arg {
9597 #     name: "handle"
9598 #     type: DT_STRING
9599 #     is_ref: true
9600 #   }
9601 #   input_arg {
9602 #     name: "num_required"
9603 #     type: DT_INT32
9604 #   }
9605 #   output_arg {
9606 #     name: "indices"
9607 #     type: DT_INT64
9608 #   }
9609 #   output_arg {
9610 #     name: "values"
9611 #     type_attr: "dtype"
9612 #   }
9613 #   output_arg {
9614 #     name: "shape"
9615 #     type: DT_INT64
9616 #   }
9617 #   attr {
9618 #     name: "dtype"
9619 #     type: "type"
9620 #     allowed_values {
9621 #       list {
9622 #         type: DT_FLOAT
9623 #         type: DT_DOUBLE
9624 #         type: DT_INT32
9625 #         type: DT_UINT8
9626 #         type: DT_INT16
9627 #         type: DT_INT8
9628 #         type: DT_COMPLEX64
9629 #         type: DT_INT64
9630 #         type: DT_QINT8
9631 #         type: DT_QUINT8
9632 #         type: DT_QINT32
9633 #         type: DT_BFLOAT16
9634 #         type: DT_UINT16
9635 #         type: DT_COMPLEX128
9636 #         type: DT_HALF
9637 #         type: DT_UINT32
9638 #         type: DT_UINT64
9639 #       }
9640 #     }
9641 #   }
9642 # }
9643 # op {
9644 #   name: "SparseConditionalAccumulator"
9645 #   output_arg {
9646 #     name: "handle"
9647 #     type: DT_STRING
9648 #     is_ref: true
9649 #   }
9650 #   attr {
9651 #     name: "dtype"
9652 #     type: "type"
9653 #     allowed_values {
9654 #       list {
9655 #         type: DT_FLOAT
9656 #         type: DT_DOUBLE
9657 #         type: DT_INT32
9658 #         type: DT_UINT8
9659 #         type: DT_INT16
9660 #         type: DT_INT8
9661 #         type: DT_COMPLEX64
9662 #         type: DT_INT64
9663 #         type: DT_QINT8
9664 #         type: DT_QUINT8
9665 #         type: DT_QINT32
9666 #         type: DT_BFLOAT16
9667 #         type: DT_UINT16
9668 #         type: DT_COMPLEX128
9669 #         type: DT_HALF
9670 #         type: DT_UINT32
9671 #         type: DT_UINT64
9672 #       }
9673 #     }
9674 #   }
9675 #   attr {
9676 #     name: "shape"
9677 #     type: "shape"
9678 #   }
9679 #   attr {
9680 #     name: "container"
9681 #     type: "string"
9682 #     default_value {
9683 #       s: ""
9684 #     }
9685 #   }
9686 #   attr {
9687 #     name: "shared_name"
9688 #     type: "string"
9689 #     default_value {
9690 #       s: ""
9691 #     }
9692 #   }
9693 #   attr {
9694 #     name: "reduction_type"
9695 #     type: "string"
9696 #     default_value {
9697 #       s: "MEAN"
9698 #     }
9699 #     allowed_values {
9700 #       list {
9701 #         s: "MEAN"
9702 #         s: "SUM"
9703 #       }
9704 #     }
9705 #   }
9706 #   is_stateful: true
9707 # }
9708 # op {
9709 #   name: "Stack"
9710 #   output_arg {
9711 #     name: "handle"
9712 #     type: DT_STRING
9713 #     is_ref: true
9714 #   }
9715 #   attr {
9716 #     name: "elem_type"
9717 #     type: "type"
9718 #   }
9719 #   attr {
9720 #     name: "stack_name"
9721 #     type: "string"
9722 #     default_value {
9723 #       s: ""
9724 #     }
9725 #   }
9726 #   is_stateful: true
9727 # }
9728 # op {
9729 #   name: "StackClose"
9730 #   input_arg {
9731 #     name: "handle"
9732 #     type: DT_STRING
9733 #     is_ref: true
9734 #   }
9735 # }
9736 # op {
9737 #   name: "StackCloseV2"
9738 #   input_arg {
9739 #     name: "handle"
9740 #     type: DT_RESOURCE
9741 #   }
9742 #   is_stateful: true
9743 # }
9744 # op {
9745 #   name: "StackPop"
9746 #   input_arg {
9747 #     name: "handle"
9748 #     type: DT_STRING
9749 #     is_ref: true
9750 #   }
9751 #   output_arg {
9752 #     name: "elem"
9753 #     type_attr: "elem_type"
9754 #   }
9755 #   attr {
9756 #     name: "elem_type"
9757 #     type: "type"
9758 #   }
9759 # }
9760 # op {
9761 #   name: "StackPopV2"
9762 #   input_arg {
9763 #     name: "handle"
9764 #     type: DT_RESOURCE
9765 #   }
9766 #   output_arg {
9767 #     name: "elem"
9768 #     type_attr: "elem_type"
9769 #   }
9770 #   attr {
9771 #     name: "elem_type"
9772 #     type: "type"
9773 #   }
9774 #   is_stateful: true
9775 # }
9776 # op {
9777 #   name: "StackPush"
9778 #   input_arg {
9779 #     name: "handle"
9780 #     type: DT_STRING
9781 #     is_ref: true
9782 #   }
9783 #   input_arg {
9784 #     name: "elem"
9785 #     type_attr: "T"
9786 #   }
9787 #   output_arg {
9788 #     name: "output"
9789 #     type_attr: "T"
9790 #   }
9791 #   attr {
9792 #     name: "T"
9793 #     type: "type"
9794 #   }
9795 #   attr {
9796 #     name: "swap_memory"
9797 #     type: "bool"
9798 #     default_value {
9799 #       b: false
9800 #     }
9801 #   }
9802 # }
9803 # op {
9804 #   name: "StackPushV2"
9805 #   input_arg {
9806 #     name: "handle"
9807 #     type: DT_RESOURCE
9808 #   }
9809 #   input_arg {
9810 #     name: "elem"
9811 #     type_attr: "T"
9812 #   }
9813 #   output_arg {
9814 #     name: "output"
9815 #     type_attr: "T"
9816 #   }
9817 #   attr {
9818 #     name: "T"
9819 #     type: "type"
9820 #   }
9821 #   attr {
9822 #     name: "swap_memory"
9823 #     type: "bool"
9824 #     default_value {
9825 #       b: false
9826 #     }
9827 #   }
9828 #   is_stateful: true
9829 # }
9830 # op {
9831 #   name: "StackV2"
9832 #   input_arg {
9833 #     name: "max_size"
9834 #     type: DT_INT32
9835 #   }
9836 #   output_arg {
9837 #     name: "handle"
9838 #     type: DT_RESOURCE
9839 #   }
9840 #   attr {
9841 #     name: "elem_type"
9842 #     type: "type"
9843 #   }
9844 #   attr {
9845 #     name: "stack_name"
9846 #     type: "string"
9847 #     default_value {
9848 #       s: ""
9849 #     }
9850 #   }
9851 #   is_stateful: true
9852 # }
9853 # op {
9854 #   name: "Stage"
9855 #   input_arg {
9856 #     name: "values"
9857 #     type_list_attr: "dtypes"
9858 #   }
9859 #   attr {
9860 #     name: "capacity"
9861 #     type: "int"
9862 #     default_value {
9863 #       i: 0
9864 #     }
9865 #     has_minimum: true
9866 #   }
9867 #   attr {
9868 #     name: "memory_limit"
9869 #     type: "int"
9870 #     default_value {
9871 #       i: 0
9872 #     }
9873 #     has_minimum: true
9874 #   }
9875 #   attr {
9876 #     name: "dtypes"
9877 #     type: "list(type)"
9878 #     has_minimum: true
9879 #     minimum: 1
9880 #   }
9881 #   attr {
9882 #     name: "container"
9883 #     type: "string"
9884 #     default_value {
9885 #       s: ""
9886 #     }
9887 #   }
9888 #   attr {
9889 #     name: "shared_name"
9890 #     type: "string"
9891 #     default_value {
9892 #       s: ""
9893 #     }
9894 #   }
9895 #   is_stateful: true
9896 # }
9897 # op {
9898 #   name: "StageClear"
9899 #   attr {
9900 #     name: "capacity"
9901 #     type: "int"
9902 #     default_value {
9903 #       i: 0
9904 #     }
9905 #     has_minimum: true
9906 #   }
9907 #   attr {
9908 #     name: "memory_limit"
9909 #     type: "int"
9910 #     default_value {
9911 #       i: 0
9912 #     }
9913 #     has_minimum: true
9914 #   }
9915 #   attr {
9916 #     name: "dtypes"
9917 #     type: "list(type)"
9918 #   }
9919 #   attr {
9920 #     name: "container"
9921 #     type: "string"
9922 #     default_value {
9923 #       s: ""
9924 #     }
9925 #   }
9926 #   attr {
9927 #     name: "shared_name"
9928 #     type: "string"
9929 #     default_value {
9930 #       s: ""
9931 #     }
9932 #   }
9933 #   is_stateful: true
9934 # }
9935 # op {
9936 #   name: "StagePeek"
9937 #   input_arg {
9938 #     name: "index"
9939 #     type: DT_INT32
9940 #   }
9941 #   output_arg {
9942 #     name: "values"
9943 #     type_list_attr: "dtypes"
9944 #   }
9945 #   attr {
9946 #     name: "capacity"
9947 #     type: "int"
9948 #     default_value {
9949 #       i: 0
9950 #     }
9951 #     has_minimum: true
9952 #   }
9953 #   attr {
9954 #     name: "memory_limit"
9955 #     type: "int"
9956 #     default_value {
9957 #       i: 0
9958 #     }
9959 #     has_minimum: true
9960 #   }
9961 #   attr {
9962 #     name: "dtypes"
9963 #     type: "list(type)"
9964 #     has_minimum: true
9965 #     minimum: 1
9966 #   }
9967 #   attr {
9968 #     name: "container"
9969 #     type: "string"
9970 #     default_value {
9971 #       s: ""
9972 #     }
9973 #   }
9974 #   attr {
9975 #     name: "shared_name"
9976 #     type: "string"
9977 #     default_value {
9978 #       s: ""
9979 #     }
9980 #   }
9981 #   is_stateful: true
9982 # }
9983 # op {
9984 #   name: "StageSize"
9985 #   output_arg {
9986 #     name: "size"
9987 #     type: DT_INT32
9988 #   }
9989 #   attr {
9990 #     name: "capacity"
9991 #     type: "int"
9992 #     default_value {
9993 #       i: 0
9994 #     }
9995 #     has_minimum: true
9996 #   }
9997 #   attr {
9998 #     name: "memory_limit"
9999 #     type: "int"
10000 #     default_value {
10001 #       i: 0
10002 #     }
10003 #     has_minimum: true
10004 #   }
10005 #   attr {
10006 #     name: "dtypes"
10007 #     type: "list(type)"
10008 #   }
10009 #   attr {
10010 #     name: "container"
10011 #     type: "string"
10012 #     default_value {
10013 #       s: ""
10014 #     }
10015 #   }
10016 #   attr {
10017 #     name: "shared_name"
10018 #     type: "string"
10019 #     default_value {
10020 #       s: ""
10021 #     }
10022 #   }
10023 #   is_stateful: true
10024 # }
10025 # op {
10026 #   name: "TensorArray"
10027 #   input_arg {
10028 #     name: "size"
10029 #     type: DT_INT32
10030 #   }
10031 #   output_arg {
10032 #     name: "handle"
10033 #     type: DT_STRING
10034 #     is_ref: true
10035 #   }
10036 #   attr {
10037 #     name: "dtype"
10038 #     type: "type"
10039 #   }
10040 #   attr {
10041 #     name: "dynamic_size"
10042 #     type: "bool"
10043 #     default_value {
10044 #       b: false
10045 #     }
10046 #   }
10047 #   attr {
10048 #     name: "clear_after_read"
10049 #     type: "bool"
10050 #     default_value {
10051 #       b: true
10052 #     }
10053 #   }
10054 #   attr {
10055 #     name: "tensor_array_name"
10056 #     type: "string"
10057 #     default_value {
10058 #       s: ""
10059 #     }
10060 #   }
10061 #   attr {
10062 #     name: "element_shape"
10063 #     type: "shape"
10064 #     default_value {
10065 #       shape {
10066 #         unknown_rank: true
10067 #       }
10068 #     }
10069 #   }
10070 #   deprecation {
10071 #     version: 16
10072 #     explanation: "Use TensorArrayV3"
10073 #   }
10074 #   is_stateful: true
10075 # }
10076 # op {
10077 #   name: "TensorArrayClose"
10078 #   input_arg {
10079 #     name: "handle"
10080 #     type: DT_STRING
10081 #     is_ref: true
10082 #   }
10083 #   deprecation {
10084 #     version: 16
10085 #     explanation: "Use TensorArrayCloseV3"
10086 #   }
10087 # }
10088 # op {
10089 #   name: "TensorArrayCloseV2"
10090 #   input_arg {
10091 #     name: "handle"
10092 #     type: DT_STRING
10093 #   }
10094 #   deprecation {
10095 #     version: 26
10096 #     explanation: "Use TensorArrayCloseV3"
10097 #   }
10098 # }
10099 # op {
10100 #   name: "TensorArrayCloseV3"
10101 #   input_arg {
10102 #     name: "handle"
10103 #     type: DT_RESOURCE
10104 #   }
10105 #   is_stateful: true
10106 # }
10107 # op {
10108 #   name: "TensorArrayConcat"
10109 #   input_arg {
10110 #     name: "handle"
10111 #     type: DT_STRING
10112 #     is_ref: true
10113 #   }
10114 #   input_arg {
10115 #     name: "flow_in"
10116 #     type: DT_FLOAT
10117 #   }
10118 #   output_arg {
10119 #     name: "value"
10120 #     type_attr: "dtype"
10121 #   }
10122 #   output_arg {
10123 #     name: "lengths"
10124 #     type: DT_INT64
10125 #   }
10126 #   attr {
10127 #     name: "dtype"
10128 #     type: "type"
10129 #   }
10130 #   attr {
10131 #     name: "element_shape_except0"
10132 #     type: "shape"
10133 #     default_value {
10134 #       shape {
10135 #         unknown_rank: true
10136 #       }
10137 #     }
10138 #   }
10139 #   deprecation {
10140 #     version: 16
10141 #     explanation: "Use TensorArrayGradV3"
10142 #   }
10143 # }
10144 # op {
10145 #   name: "TensorArrayConcatV2"
10146 #   input_arg {
10147 #     name: "handle"
10148 #     type: DT_STRING
10149 #   }
10150 #   input_arg {
10151 #     name: "flow_in"
10152 #     type: DT_FLOAT
10153 #   }
10154 #   output_arg {
10155 #     name: "value"
10156 #     type_attr: "dtype"
10157 #   }
10158 #   output_arg {
10159 #     name: "lengths"
10160 #     type: DT_INT64
10161 #   }
10162 #   attr {
10163 #     name: "dtype"
10164 #     type: "type"
10165 #   }
10166 #   attr {
10167 #     name: "element_shape_except0"
10168 #     type: "shape"
10169 #     default_value {
10170 #       shape {
10171 #         unknown_rank: true
10172 #       }
10173 #     }
10174 #   }
10175 # }
10176 # op {
10177 #   name: "TensorArrayConcatV3"
10178 #   input_arg {
10179 #     name: "handle"
10180 #     type: DT_RESOURCE
10181 #   }
10182 #   input_arg {
10183 #     name: "flow_in"
10184 #     type: DT_FLOAT
10185 #   }
10186 #   output_arg {
10187 #     name: "value"
10188 #     type_attr: "dtype"
10189 #   }
10190 #   output_arg {
10191 #     name: "lengths"
10192 #     type: DT_INT64
10193 #   }
10194 #   attr {
10195 #     name: "dtype"
10196 #     type: "type"
10197 #   }
10198 #   attr {
10199 #     name: "element_shape_except0"
10200 #     type: "shape"
10201 #     default_value {
10202 #       shape {
10203 #         unknown_rank: true
10204 #       }
10205 #     }
10206 #   }
10207 #   is_stateful: true
10208 # }
10209 # op {
10210 #   name: "TensorArrayGather"
10211 #   input_arg {
10212 #     name: "handle"
10213 #     type: DT_STRING
10214 #     is_ref: true
10215 #   }
10216 #   input_arg {
10217 #     name: "indices"
10218 #     type: DT_INT32
10219 #   }
10220 #   input_arg {
10221 #     name: "flow_in"
10222 #     type: DT_FLOAT
10223 #   }
10224 #   output_arg {
10225 #     name: "value"
10226 #     type_attr: "dtype"
10227 #   }
10228 #   attr {
10229 #     name: "dtype"
10230 #     type: "type"
10231 #   }
10232 #   attr {
10233 #     name: "element_shape"
10234 #     type: "shape"
10235 #     default_value {
10236 #       shape {
10237 #         unknown_rank: true
10238 #       }
10239 #     }
10240 #   }
10241 #   deprecation {
10242 #     version: 16
10243 #     explanation: "Use TensorArrayGatherV3"
10244 #   }
10245 # }
10246 # op {
10247 #   name: "TensorArrayGatherV2"
10248 #   input_arg {
10249 #     name: "handle"
10250 #     type: DT_STRING
10251 #   }
10252 #   input_arg {
10253 #     name: "indices"
10254 #     type: DT_INT32
10255 #   }
10256 #   input_arg {
10257 #     name: "flow_in"
10258 #     type: DT_FLOAT
10259 #   }
10260 #   output_arg {
10261 #     name: "value"
10262 #     type_attr: "dtype"
10263 #   }
10264 #   attr {
10265 #     name: "dtype"
10266 #     type: "type"
10267 #   }
10268 #   attr {
10269 #     name: "element_shape"
10270 #     type: "shape"
10271 #     default_value {
10272 #       shape {
10273 #         unknown_rank: true
10274 #       }
10275 #     }
10276 #   }
10277 #   deprecation {
10278 #     version: 26
10279 #     explanation: "Use TensorArrayGatherV3"
10280 #   }
10281 # }
10282 # op {
10283 #   name: "TensorArrayGatherV3"
10284 #   input_arg {
10285 #     name: "handle"
10286 #     type: DT_RESOURCE
10287 #   }
10288 #   input_arg {
10289 #     name: "indices"
10290 #     type: DT_INT32
10291 #   }
10292 #   input_arg {
10293 #     name: "flow_in"
10294 #     type: DT_FLOAT
10295 #   }
10296 #   output_arg {
10297 #     name: "value"
10298 #     type_attr: "dtype"
10299 #   }
10300 #   attr {
10301 #     name: "dtype"
10302 #     type: "type"
10303 #   }
10304 #   attr {
10305 #     name: "element_shape"
10306 #     type: "shape"
10307 #     default_value {
10308 #       shape {
10309 #         unknown_rank: true
10310 #       }
10311 #     }
10312 #   }
10313 #   is_stateful: true
10314 # }
10315 # op {
10316 #   name: "TensorArrayGrad"
10317 #   input_arg {
10318 #     name: "handle"
10319 #     type: DT_STRING
10320 #   }
10321 #   input_arg {
10322 #     name: "flow_in"
10323 #     type: DT_FLOAT
10324 #   }
10325 #   output_arg {
10326 #     name: "grad_handle"
10327 #     type: DT_STRING
10328 #     is_ref: true
10329 #   }
10330 #   attr {
10331 #     name: "source"
10332 #     type: "string"
10333 #   }
10334 #   deprecation {
10335 #     version: 16
10336 #     explanation: "Use TensorArrayGradV3"
10337 #   }
10338 #   is_stateful: true
10339 # }
10340 # op {
10341 #   name: "TensorArrayGradV2"
10342 #   input_arg {
10343 #     name: "handle"
10344 #     type: DT_STRING
10345 #   }
10346 #   input_arg {
10347 #     name: "flow_in"
10348 #     type: DT_FLOAT
10349 #   }
10350 #   output_arg {
10351 #     name: "grad_handle"
10352 #     type: DT_STRING
10353 #   }
10354 #   attr {
10355 #     name: "source"
10356 #     type: "string"
10357 #   }
10358 #   deprecation {
10359 #     version: 26
10360 #     explanation: "Use TensorArrayGradV3"
10361 #   }
10362 #   is_stateful: true
10363 # }
10364 # op {
10365 #   name: "TensorArrayGradV3"
10366 #   input_arg {
10367 #     name: "handle"
10368 #     type: DT_RESOURCE
10369 #   }
10370 #   input_arg {
10371 #     name: "flow_in"
10372 #     type: DT_FLOAT
10373 #   }
10374 #   output_arg {
10375 #     name: "grad_handle"
10376 #     type: DT_RESOURCE
10377 #   }
10378 #   output_arg {
10379 #     name: "flow_out"
10380 #     type: DT_FLOAT
10381 #   }
10382 #   attr {
10383 #     name: "source"
10384 #     type: "string"
10385 #   }
10386 #   is_stateful: true
10387 # }
10388 # op {
10389 #   name: "TensorArrayGradWithShape"
10390 #   input_arg {
10391 #     name: "handle"
10392 #     type: DT_RESOURCE
10393 #   }
10394 #   input_arg {
10395 #     name: "flow_in"
10396 #     type: DT_FLOAT
10397 #   }
10398 #   input_arg {
10399 #     name: "shape_to_prepend"
10400 #     type: DT_INT32
10401 #   }
10402 #   output_arg {
10403 #     name: "grad_handle"
10404 #     type: DT_RESOURCE
10405 #   }
10406 #   output_arg {
10407 #     name: "flow_out"
10408 #     type: DT_FLOAT
10409 #   }
10410 #   attr {
10411 #     name: "source"
10412 #     type: "string"
10413 #   }
10414 #   is_stateful: true
10415 # }
10416 # op {
10417 #   name: "TensorArrayPack"
10418 #   input_arg {
10419 #     name: "handle"
10420 #     type: DT_STRING
10421 #     is_ref: true
10422 #   }
10423 #   input_arg {
10424 #     name: "flow_in"
10425 #     type: DT_FLOAT
10426 #   }
10427 #   output_arg {
10428 #     name: "value"
10429 #     type_attr: "dtype"
10430 #   }
10431 #   attr {
10432 #     name: "dtype"
10433 #     type: "type"
10434 #   }
10435 #   attr {
10436 #     name: "element_shape"
10437 #     type: "shape"
10438 #     default_value {
10439 #       shape {
10440 #         unknown_rank: true
10441 #       }
10442 #     }
10443 #   }
10444 #   deprecation {
10445 #     version: 16
10446 #     explanation: "Use TensorArrayGatherV3 with RangeOp"
10447 #   }
10448 # }
10449 # op {
10450 #   name: "TensorArrayRead"
10451 #   input_arg {
10452 #     name: "handle"
10453 #     type: DT_STRING
10454 #     is_ref: true
10455 #   }
10456 #   input_arg {
10457 #     name: "index"
10458 #     type: DT_INT32
10459 #   }
10460 #   input_arg {
10461 #     name: "flow_in"
10462 #     type: DT_FLOAT
10463 #   }
10464 #   output_arg {
10465 #     name: "value"
10466 #     type_attr: "dtype"
10467 #   }
10468 #   attr {
10469 #     name: "dtype"
10470 #     type: "type"
10471 #   }
10472 #   deprecation {
10473 #     version: 16
10474 #     explanation: "Use TensorArrayReadV3"
10475 #   }
10476 # }
10477 # op {
10478 #   name: "TensorArrayReadV2"
10479 #   input_arg {
10480 #     name: "handle"
10481 #     type: DT_STRING
10482 #   }
10483 #   input_arg {
10484 #     name: "index"
10485 #     type: DT_INT32
10486 #   }
10487 #   input_arg {
10488 #     name: "flow_in"
10489 #     type: DT_FLOAT
10490 #   }
10491 #   output_arg {
10492 #     name: "value"
10493 #     type_attr: "dtype"
10494 #   }
10495 #   attr {
10496 #     name: "dtype"
10497 #     type: "type"
10498 #   }
10499 #   deprecation {
10500 #     version: 26
10501 #     explanation: "Use TensorArrayReadV3"
10502 #   }
10503 # }
10504 # op {
10505 #   name: "TensorArrayReadV3"
10506 #   input_arg {
10507 #     name: "handle"
10508 #     type: DT_RESOURCE
10509 #   }
10510 #   input_arg {
10511 #     name: "index"
10512 #     type: DT_INT32
10513 #   }
10514 #   input_arg {
10515 #     name: "flow_in"
10516 #     type: DT_FLOAT
10517 #   }
10518 #   output_arg {
10519 #     name: "value"
10520 #     type_attr: "dtype"
10521 #   }
10522 #   attr {
10523 #     name: "dtype"
10524 #     type: "type"
10525 #   }
10526 #   is_stateful: true
10527 # }
10528 # op {
10529 #   name: "TensorArrayScatter"
10530 #   input_arg {
10531 #     name: "handle"
10532 #     type: DT_STRING
10533 #     is_ref: true
10534 #   }
10535 #   input_arg {
10536 #     name: "indices"
10537 #     type: DT_INT32
10538 #   }
10539 #   input_arg {
10540 #     name: "value"
10541 #     type_attr: "T"
10542 #   }
10543 #   input_arg {
10544 #     name: "flow_in"
10545 #     type: DT_FLOAT
10546 #   }
10547 #   output_arg {
10548 #     name: "flow_out"
10549 #     type: DT_FLOAT
10550 #   }
10551 #   attr {
10552 #     name: "T"
10553 #     type: "type"
10554 #   }
10555 #   deprecation {
10556 #     version: 19
10557 #     explanation: "Use TensorArrayGradV3"
10558 #   }
10559 # }
10560 # op {
10561 #   name: "TensorArrayScatterV2"
10562 #   input_arg {
10563 #     name: "handle"
10564 #     type: DT_STRING
10565 #   }
10566 #   input_arg {
10567 #     name: "indices"
10568 #     type: DT_INT32
10569 #   }
10570 #   input_arg {
10571 #     name: "value"
10572 #     type_attr: "T"
10573 #   }
10574 #   input_arg {
10575 #     name: "flow_in"
10576 #     type: DT_FLOAT
10577 #   }
10578 #   output_arg {
10579 #     name: "flow_out"
10580 #     type: DT_FLOAT
10581 #   }
10582 #   attr {
10583 #     name: "T"
10584 #     type: "type"
10585 #   }
10586 #   deprecation {
10587 #     version: 26
10588 #     explanation: "Use TensorArrayScatterV3"
10589 #   }
10590 # }
10591 # op {
10592 #   name: "TensorArrayScatterV3"
10593 #   input_arg {
10594 #     name: "handle"
10595 #     type: DT_RESOURCE
10596 #   }
10597 #   input_arg {
10598 #     name: "indices"
10599 #     type: DT_INT32
10600 #   }
10601 #   input_arg {
10602 #     name: "value"
10603 #     type_attr: "T"
10604 #   }
10605 #   input_arg {
10606 #     name: "flow_in"
10607 #     type: DT_FLOAT
10608 #   }
10609 #   output_arg {
10610 #     name: "flow_out"
10611 #     type: DT_FLOAT
10612 #   }
10613 #   attr {
10614 #     name: "T"
10615 #     type: "type"
10616 #   }
10617 #   is_stateful: true
10618 # }
10619 # op {
10620 #   name: "TensorArraySize"
10621 #   input_arg {
10622 #     name: "handle"
10623 #     type: DT_STRING
10624 #     is_ref: true
10625 #   }
10626 #   input_arg {
10627 #     name: "flow_in"
10628 #     type: DT_FLOAT
10629 #   }
10630 #   output_arg {
10631 #     name: "size"
10632 #     type: DT_INT32
10633 #   }
10634 #   deprecation {
10635 #     version: 16
10636 #     explanation: "Use TensorArraySizeV3"
10637 #   }
10638 # }
10639 # op {
10640 #   name: "TensorArraySizeV2"
10641 #   input_arg {
10642 #     name: "handle"
10643 #     type: DT_STRING
10644 #   }
10645 #   input_arg {
10646 #     name: "flow_in"
10647 #     type: DT_FLOAT
10648 #   }
10649 #   output_arg {
10650 #     name: "size"
10651 #     type: DT_INT32
10652 #   }
10653 #   deprecation {
10654 #     version: 26
10655 #     explanation: "Use TensorArraySizeV3"
10656 #   }
10657 # }
10658 # op {
10659 #   name: "TensorArraySizeV3"
10660 #   input_arg {
10661 #     name: "handle"
10662 #     type: DT_RESOURCE
10663 #   }
10664 #   input_arg {
10665 #     name: "flow_in"
10666 #     type: DT_FLOAT
10667 #   }
10668 #   output_arg {
10669 #     name: "size"
10670 #     type: DT_INT32
10671 #   }
10672 #   is_stateful: true
10673 # }
10674 # op {
10675 #   name: "TensorArraySplit"
10676 #   input_arg {
10677 #     name: "handle"
10678 #     type: DT_STRING
10679 #     is_ref: true
10680 #   }
10681 #   input_arg {
10682 #     name: "value"
10683 #     type_attr: "T"
10684 #   }
10685 #   input_arg {
10686 #     name: "lengths"
10687 #     type: DT_INT64
10688 #   }
10689 #   input_arg {
10690 #     name: "flow_in"
10691 #     type: DT_FLOAT
10692 #   }
10693 #   output_arg {
10694 #     name: "flow_out"
10695 #     type: DT_FLOAT
10696 #   }
10697 #   attr {
10698 #     name: "T"
10699 #     type: "type"
10700 #   }
10701 #   deprecation {
10702 #     version: 16
10703 #     explanation: "Use TensorArraySplitV3"
10704 #   }
10705 # }
10706 # op {
10707 #   name: "TensorArraySplitV2"
10708 #   input_arg {
10709 #     name: "handle"
10710 #     type: DT_STRING
10711 #   }
10712 #   input_arg {
10713 #     name: "value"
10714 #     type_attr: "T"
10715 #   }
10716 #   input_arg {
10717 #     name: "lengths"
10718 #     type: DT_INT64
10719 #   }
10720 #   input_arg {
10721 #     name: "flow_in"
10722 #     type: DT_FLOAT
10723 #   }
10724 #   output_arg {
10725 #     name: "flow_out"
10726 #     type: DT_FLOAT
10727 #   }
10728 #   attr {
10729 #     name: "T"
10730 #     type: "type"
10731 #   }
10732 #   deprecation {
10733 #     version: 26
10734 #     explanation: "Use TensorArraySplitV3"
10735 #   }
10736 # }
10737 # op {
10738 #   name: "TensorArraySplitV3"
10739 #   input_arg {
10740 #     name: "handle"
10741 #     type: DT_RESOURCE
10742 #   }
10743 #   input_arg {
10744 #     name: "value"
10745 #     type_attr: "T"
10746 #   }
10747 #   input_arg {
10748 #     name: "lengths"
10749 #     type: DT_INT64
10750 #   }
10751 #   input_arg {
10752 #     name: "flow_in"
10753 #     type: DT_FLOAT
10754 #   }
10755 #   output_arg {
10756 #     name: "flow_out"
10757 #     type: DT_FLOAT
10758 #   }
10759 #   attr {
10760 #     name: "T"
10761 #     type: "type"
10762 #   }
10763 #   is_stateful: true
10764 # }
10765 # op {
10766 #   name: "TensorArrayUnpack"
10767 #   input_arg {
10768 #     name: "handle"
10769 #     type: DT_STRING
10770 #     is_ref: true
10771 #   }
10772 #   input_arg {
10773 #     name: "value"
10774 #     type_attr: "T"
10775 #   }
10776 #   input_arg {
10777 #     name: "flow_in"
10778 #     type: DT_FLOAT
10779 #   }
10780 #   output_arg {
10781 #     name: "flow_out"
10782 #     type: DT_FLOAT
10783 #   }
10784 #   attr {
10785 #     name: "T"
10786 #     type: "type"
10787 #   }
10788 #   deprecation {
10789 #     version: 20
10790 #     explanation: "Use TensorArrayScatterV3 with RangeOp"
10791 #   }
10792 # }
10793 # op {
10794 #   name: "TensorArrayV2"
10795 #   input_arg {
10796 #     name: "size"
10797 #     type: DT_INT32
10798 #   }
10799 #   output_arg {
10800 #     name: "handle"
10801 #     type: DT_STRING
10802 #   }
10803 #   attr {
10804 #     name: "dtype"
10805 #     type: "type"
10806 #   }
10807 #   attr {
10808 #     name: "element_shape"
10809 #     type: "shape"
10810 #     default_value {
10811 #       shape {
10812 #         unknown_rank: true
10813 #       }
10814 #     }
10815 #   }
10816 #   attr {
10817 #     name: "dynamic_size"
10818 #     type: "bool"
10819 #     default_value {
10820 #       b: false
10821 #     }
10822 #   }
10823 #   attr {
10824 #     name: "clear_after_read"
10825 #     type: "bool"
10826 #     default_value {
10827 #       b: true
10828 #     }
10829 #   }
10830 #   attr {
10831 #     name: "tensor_array_name"
10832 #     type: "string"
10833 #     default_value {
10834 #       s: ""
10835 #     }
10836 #   }
10837 #   deprecation {
10838 #     version: 26
10839 #     explanation: "Use TensorArrayV3"
10840 #   }
10841 #   is_stateful: true
10842 # }
10843 # op {
10844 #   name: "TensorArrayV3"
10845 #   input_arg {
10846 #     name: "size"
10847 #     type: DT_INT32
10848 #   }
10849 #   output_arg {
10850 #     name: "handle"
10851 #     type: DT_RESOURCE
10852 #   }
10853 #   output_arg {
10854 #     name: "flow"
10855 #     type: DT_FLOAT
10856 #   }
10857 #   attr {
10858 #     name: "dtype"
10859 #     type: "type"
10860 #   }
10861 #   attr {
10862 #     name: "element_shape"
10863 #     type: "shape"
10864 #     default_value {
10865 #       shape {
10866 #         unknown_rank: true
10867 #       }
10868 #     }
10869 #   }
10870 #   attr {
10871 #     name: "dynamic_size"
10872 #     type: "bool"
10873 #     default_value {
10874 #       b: false
10875 #     }
10876 #   }
10877 #   attr {
10878 #     name: "clear_after_read"
10879 #     type: "bool"
10880 #     default_value {
10881 #       b: true
10882 #     }
10883 #   }
10884 #   attr {
10885 #     name: "identical_element_shapes"
10886 #     type: "bool"
10887 #     default_value {
10888 #       b: false
10889 #     }
10890 #   }
10891 #   attr {
10892 #     name: "tensor_array_name"
10893 #     type: "string"
10894 #     default_value {
10895 #       s: ""
10896 #     }
10897 #   }
10898 #   is_stateful: true
10899 # }
10900 # op {
10901 #   name: "TensorArrayWrite"
10902 #   input_arg {
10903 #     name: "handle"
10904 #     type: DT_STRING
10905 #     is_ref: true
10906 #   }
10907 #   input_arg {
10908 #     name: "index"
10909 #     type: DT_INT32
10910 #   }
10911 #   input_arg {
10912 #     name: "value"
10913 #     type_attr: "T"
10914 #   }
10915 #   input_arg {
10916 #     name: "flow_in"
10917 #     type: DT_FLOAT
10918 #   }
10919 #   output_arg {
10920 #     name: "flow_out"
10921 #     type: DT_FLOAT
10922 #   }
10923 #   attr {
10924 #     name: "T"
10925 #     type: "type"
10926 #   }
10927 #   deprecation {
10928 #     version: 16
10929 #     explanation: "Use TensorArrayWriteV3"
10930 #   }
10931 # }
10932 # op {
10933 #   name: "TensorArrayWriteV2"
10934 #   input_arg {
10935 #     name: "handle"
10936 #     type: DT_STRING
10937 #   }
10938 #   input_arg {
10939 #     name: "index"
10940 #     type: DT_INT32
10941 #   }
10942 #   input_arg {
10943 #     name: "value"
10944 #     type_attr: "T"
10945 #   }
10946 #   input_arg {
10947 #     name: "flow_in"
10948 #     type: DT_FLOAT
10949 #   }
10950 #   output_arg {
10951 #     name: "flow_out"
10952 #     type: DT_FLOAT
10953 #   }
10954 #   attr {
10955 #     name: "T"
10956 #     type: "type"
10957 #   }
10958 #   deprecation {
10959 #     version: 26
10960 #     explanation: "Use TensorArrayWriteV3"
10961 #   }
10962 # }
10963 # op {
10964 #   name: "TensorArrayWriteV3"
10965 #   input_arg {
10966 #     name: "handle"
10967 #     type: DT_RESOURCE
10968 #   }
10969 #   input_arg {
10970 #     name: "index"
10971 #     type: DT_INT32
10972 #   }
10973 #   input_arg {
10974 #     name: "value"
10975 #     type_attr: "T"
10976 #   }
10977 #   input_arg {
10978 #     name: "flow_in"
10979 #     type: DT_FLOAT
10980 #   }
10981 #   output_arg {
10982 #     name: "flow_out"
10983 #     type: DT_FLOAT
10984 #   }
10985 #   attr {
10986 #     name: "T"
10987 #     type: "type"
10988 #   }
10989 #   is_stateful: true
10990 # }
10991 # op {
10992 #   name: "Unstage"
10993 #   output_arg {
10994 #     name: "values"
10995 #     type_list_attr: "dtypes"
10996 #   }
10997 #   attr {
10998 #     name: "capacity"
10999 #     type: "int"
11000 #     default_value {
11001 #       i: 0
11002 #     }
11003 #     has_minimum: true
11004 #   }
11005 #   attr {
11006 #     name: "memory_limit"
11007 #     type: "int"
11008 #     default_value {
11009 #       i: 0
11010 #     }
11011 #     has_minimum: true
11012 #   }
11013 #   attr {
11014 #     name: "dtypes"
11015 #     type: "list(type)"
11016 #     has_minimum: true
11017 #     minimum: 1
11018 #   }
11019 #   attr {
11020 #     name: "container"
11021 #     type: "string"
11022 #     default_value {
11023 #       s: ""
11024 #     }
11025 #   }
11026 #   attr {
11027 #     name: "shared_name"
11028 #     type: "string"
11029 #     default_value {
11030 #       s: ""
11031 #     }
11032 #   }
11033 #   is_stateful: true
11034 # }
11035 _op_def_lib = _InitOpDefLibrary(b"\nr\n\030AccumulatorApplyGradient\022\r\n\006handle\030\007\200\001\001\022\016\n\nlocal_step\030\t\022\021\n\010gradient\"\005dtype\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n?\n\031AccumulatorNumAccumulated\022\r\n\006handle\030\007\200\001\001\032\023\n\017num_accumulated\030\003\n>\n\030AccumulatorSetGlobalStep\022\r\n\006handle\030\007\200\001\001\022\023\n\017new_global_step\030\t\nr\n\027AccumulatorTakeGradient\022\r\n\006handle\030\007\200\001\001\022\020\n\014num_required\030\003\032\020\n\007average\"\005dtype\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n\255\001\n\007Barrier\032\r\n\006handle\030\007\200\001\001\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\nB\n\014BarrierClose\022\r\n\006handle\030\007\200\001\001\"#\n\027cancel_pending_enqueues\022\004bool\032\002(\000\n0\n\025BarrierIncompleteSize\022\r\n\006handle\030\007\200\001\001\032\010\n\004size\030\003\n\\\n\021BarrierInsertMany\022\r\n\006handle\030\007\200\001\001\022\010\n\004keys\030\007\022\013\n\006values\"\001T\"\t\n\001T\022\004type\"\026\n\017component_index\022\003int\n+\n\020BarrierReadySize\022\r\n\006handle\030\007\200\001\001\032\010\n\004size\030\003\n\347\001\n\017BarrierTakeMany\022\r\n\006handle\030\007\200\001\001\022\020\n\014num_elements\030\003\032\013\n\007indices\030\t\032\010\n\004keys\030\007\032\031\n\006values2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\035\n\021allow_small_batch\022\004bool\032\002(\000\"\037\n\023wait_for_incomplete\022\004bool\032\002(\000\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n\305\001\n\026ConditionalAccumulator\032\r\n\006handle\030\007\200\001\001\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\016\n\005shape\022\005shape\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\"/\n\016reduction_type\022\006string\032\006\022\004MEAN:\r\n\013\022\004MEAN\022\003SUM\210\001\001\n$\n\023DeleteSessionTensor\022\n\n\006handle\030\007\210\001\001\nq\n\020DynamicPartition\022\t\n\004data\"\001T\022\016\n\npartitions\030\003\032\034\n\007outputs\"\001T*\016num_partitions\"\031\n\016num_partitions\022\003int(\0010\001\"\t\n\001T\022\004type\nS\n\rDynamicStitch\022\016\n\007indices\030\003*\001N\022\014\n\004data\"\001T*\001N\032\013\n\006merged\"\001T\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\n\257\001\n\tFIFOQueue\032\r\n\006handle\030\007\200\001\001\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\256\001\n\013FIFOQueueV2\032\n\n\006handle\030\024\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n+\n\tFakeQueue\022\014\n\010resource\030\024\032\r\n\006handle\030\007\200\001\001\210\001\001\n8\n\020GetSessionHandle\022\n\n\005value\"\001T\032\n\n\006handle\030\007\"\t\n\001T\022\004type\210\001\001\n:\n\022GetSessionHandleV2\022\n\n\005value\"\001T\032\n\n\006handle\030\024\"\t\n\001T\022\004type\210\001\001\n@\n\020GetSessionTensor\022\n\n\006handle\030\007\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\210\001\001\n\211\001\n\010MapClear\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\234\001\n\021MapIncompleteSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\264\001\n\007MapPeek\022\007\n\003key\030\t\022\013\n\007indices\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\222\001\n\007MapSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\325\001\n\010MapStage\022\007\n\003key\030\t\022\013\n\007indices\030\003\022\025\n\006values2\013fake_dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\035\n\013fake_dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\267\001\n\nMapUnstage\022\007\n\003key\030\t\022\013\n\007indices\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\274\001\n\017MapUnstageNoKey\022\013\n\007indices\030\003\032\007\n\003key\030\t\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\220\001\n\017OrderedMapClear\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\243\001\n\030OrderedMapIncompleteSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\273\001\n\016OrderedMapPeek\022\007\n\003key\030\t\022\013\n\007indices\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\231\001\n\016OrderedMapSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\334\001\n\017OrderedMapStage\022\007\n\003key\030\t\022\013\n\007indices\030\003\022\025\n\006values2\013fake_dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\035\n\013fake_dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\276\001\n\021OrderedMapUnstage\022\007\n\003key\030\t\022\013\n\007indices\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\303\001\n\026OrderedMapUnstageNoKey\022\013\n\007indices\030\003\032\007\n\003key\030\t\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\266\001\n\020PaddingFIFOQueue\032\r\n\006handle\030\007\200\001\001\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\265\001\n\022PaddingFIFOQueueV2\032\n\n\006handle\030\024\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n[\n\025ParallelDynamicStitch\022\016\n\007indices\030\003*\001N\022\014\n\004data\"\001T*\001N\032\013\n\006merged\"\001T\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\n\261\001\n\rPriorityQueue\032\r\n\006handle\030\007\200\001\001\"#\n\017component_types\022\nlist(type)\032\002\n\000(\001\"\027\n\006shapes\022\013list(shape)(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\260\001\n\017PriorityQueueV2\032\n\n\006handle\030\024\"#\n\017component_types\022\nlist(type)\032\002\n\000(\001\"\027\n\006shapes\022\013list(shape)(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n@\n\nQueueClose\022\r\n\006handle\030\007\200\001\001\"#\n\027cancel_pending_enqueues\022\004bool\032\002(\000\nB\n\014QueueCloseV2\022\n\n\006handle\030\024\"#\n\027cancel_pending_enqueues\022\004bool\032\002(\000\210\001\001\n\177\n\014QueueDequeue\022\r\n\006handle\030\007\200\001\001\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n\212\001\n\020QueueDequeueMany\022\r\n\006handle\030\007\200\001\001\022\005\n\001n\030\003\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n\214\001\n\022QueueDequeueManyV2\022\n\n\006handle\030\024\022\005\n\001n\030\003\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\n\212\001\n\020QueueDequeueUpTo\022\r\n\006handle\030\007\200\001\001\022\005\n\001n\030\003\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n\214\001\n\022QueueDequeueUpToV2\022\n\n\006handle\030\024\022\005\n\001n\030\003\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\n\201\001\n\016QueueDequeueV2\022\n\n\006handle\030\024\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\nw\n\014QueueEnqueue\022\r\n\006handle\030\007\200\001\001\022\031\n\ncomponents2\013Tcomponents\"\035\n\013Tcomponents\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n{\n\020QueueEnqueueMany\022\r\n\006handle\030\007\200\001\001\022\031\n\ncomponents2\013Tcomponents\"\035\n\013Tcomponents\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n}\n\022QueueEnqueueManyV2\022\n\n\006handle\030\024\022\031\n\ncomponents2\013Tcomponents\"\035\n\013Tcomponents\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\ny\n\016QueueEnqueueV2\022\n\n\006handle\030\024\022\031\n\ncomponents2\013Tcomponents\"\035\n\013Tcomponents\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\n-\n\rQueueIsClosed\022\r\n\006handle\030\007\200\001\001\032\r\n\tis_closed\030\n\n/\n\017QueueIsClosedV2\022\n\n\006handle\030\024\032\r\n\tis_closed\030\n\210\001\001\n$\n\tQueueSize\022\r\n\006handle\030\007\200\001\001\032\010\n\004size\030\003\n&\n\013QueueSizeV2\022\n\n\006handle\030\024\032\010\n\004size\030\003\210\001\001\n\371\001\n\022RandomShuffleQueue\032\r\n\006handle\030\007\200\001\001\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\034\n\021min_after_dequeue\022\003int\032\002\030\000\"\017\n\004seed\022\003int\032\002\030\000\"\020\n\005seed2\022\003int\032\002\030\000\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\370\001\n\024RandomShuffleQueueV2\032\n\n\006handle\030\024\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\034\n\021min_after_dequeue\022\003int\032\002\030\000\"\017\n\004seed\022\003int\032\002\030\000\"\020\n\005seed2\022\003int\032\002\030\000\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\357\001\n\013RecordInput\032\013\n\007records\030\007\"\026\n\014file_pattern\022\006string\"\034\n\020file_random_seed\022\003int\032\003\030\255\002\"(\n\030file_shuffle_shift_ratio\022\005float\032\005%\000\000\000\000\"\034\n\020file_buffer_size\022\003int\032\003\030\220N\"\033\n\020file_parallelism\022\003int\032\002\030\020\"\025\n\nbatch_size\022\003int\032\002\030 \"\036\n\020compression_type\022\006string\032\002\022\000\210\001\001\n\302\001\n\036SparseAccumulatorApplyGradient\022\r\n\006handle\030\007\200\001\001\022\016\n\nlocal_step\030\t\022\024\n\020gradient_indices\030\t\022\030\n\017gradient_values\"\005dtype\022\022\n\016gradient_shape\030\t\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\017has_known_shape\022\004bool\n\217\001\n\035SparseAccumulatorTakeGradient\022\r\n\006handle\030\007\200\001\001\022\020\n\014num_required\030\003\032\013\n\007indices\030\t\032\017\n\006values\"\005dtype\032\t\n\005shape\030\t\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n\313\001\n\034SparseConditionalAccumulator\032\r\n\006handle\030\007\200\001\001\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\016\n\005shape\022\005shape\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\"/\n\016reduction_type\022\006string\032\006\022\004MEAN:\r\n\013\022\004MEAN\022\003SUM\210\001\001\nF\n\005Stack\032\r\n\006handle\030\007\200\001\001\"\021\n\telem_type\022\004type\"\030\n\nstack_name\022\006string\032\002\022\000\210\001\001\n\033\n\nStackClose\022\r\n\006handle\030\007\200\001\001\n\035\n\014StackCloseV2\022\n\n\006handle\030\024\210\001\001\n?\n\010StackPop\022\r\n\006handle\030\007\200\001\001\032\021\n\004elem\"\telem_type\"\021\n\telem_type\022\004type\nA\n\nStackPopV2\022\n\n\006handle\030\024\032\021\n\004elem\"\telem_type\"\021\n\telem_type\022\004type\210\001\001\nV\n\tStackPush\022\r\n\006handle\030\007\200\001\001\022\t\n\004elem\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\027\n\013swap_memory\022\004bool\032\002(\000\nX\n\013StackPushV2\022\n\n\006handle\030\024\022\t\n\004elem\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\027\n\013swap_memory\022\004bool\032\002(\000\210\001\001\nS\n\007StackV2\022\014\n\010max_size\030\003\032\n\n\006handle\030\024\"\021\n\telem_type\022\004type\"\030\n\nstack_name\022\006string\032\002\022\000\210\001\001\n\234\001\n\005Stage\022\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\213\001\n\nStageClear\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\253\001\n\tStagePeek\022\t\n\005index\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\224\001\n\tStageSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\306\001\n\013TensorArray\022\010\n\004size\030\003\032\r\n\006handle\030\007\200\001\001\"\r\n\005dtype\022\004type\"\030\n\014dynamic_size\022\004bool\032\002(\000\"\034\n\020clear_after_read\022\004bool\032\002(\001\"\037\n\021tensor_array_name\022\006string\032\002\022\000\"\034\n\relement_shape\022\005shape\032\004:\002\030\001B\025\010\020\022\021Use TensorArrayV3\210\001\001\n=\n\020TensorArrayClose\022\r\n\006handle\030\007\200\001\001B\032\010\020\022\026Use TensorArrayCloseV3\n<\n\022TensorArrayCloseV2\022\n\n\006handle\030\007B\032\010\032\022\026Use TensorArrayCloseV3\n#\n\022TensorArrayCloseV3\022\n\n\006handle\030\024\210\001\001\n\234\001\n\021TensorArrayConcat\022\r\n\006handle\030\007\200\001\001\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\032\013\n\007lengths\030\t\"\r\n\005dtype\022\004type\"$\n\025element_shape_except0\022\005shape\032\004:\002\030\001B\031\010\020\022\025Use TensorArrayGradV3\n\200\001\n\023TensorArrayConcatV2\022\n\n\006handle\030\007\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\032\013\n\007lengths\030\t\"\r\n\005dtype\022\004type\"$\n\025element_shape_except0\022\005shape\032\004:\002\030\001\n\203\001\n\023TensorArrayConcatV3\022\n\n\006handle\030\024\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\032\013\n\007lengths\030\t\"\r\n\005dtype\022\004type\"$\n\025element_shape_except0\022\005shape\032\004:\002\030\001\210\001\001\n\226\001\n\021TensorArrayGather\022\r\n\006handle\030\007\200\001\001\022\013\n\007indices\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001B\033\010\020\022\027Use TensorArrayGatherV3\n\225\001\n\023TensorArrayGatherV2\022\n\n\006handle\030\007\022\013\n\007indices\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001B\033\010\032\022\027Use TensorArrayGatherV3\n{\n\023TensorArrayGatherV3\022\n\n\006handle\030\024\022\013\n\007indices\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001\210\001\001\nn\n\017TensorArrayGrad\022\n\n\006handle\030\007\022\013\n\007flow_in\030\001\032\022\n\013grad_handle\030\007\200\001\001\"\020\n\006source\022\006stringB\031\010\020\022\025Use TensorArrayGradV3\210\001\001\nm\n\021TensorArrayGradV2\022\n\n\006handle\030\007\022\013\n\007flow_in\030\001\032\017\n\013grad_handle\030\007\"\020\n\006source\022\006stringB\031\010\032\022\025Use TensorArrayGradV3\210\001\001\n`\n\021TensorArrayGradV3\022\n\n\006handle\030\024\022\013\n\007flow_in\030\001\032\017\n\013grad_handle\030\024\032\014\n\010flow_out\030\001\"\020\n\006source\022\006string\210\001\001\n}\n\030TensorArrayGradWithShape\022\n\n\006handle\030\024\022\013\n\007flow_in\030\001\022\024\n\020shape_to_prepend\030\003\032\017\n\013grad_handle\030\024\032\014\n\010flow_out\030\001\"\020\n\006source\022\006string\210\001\001\n\224\001\n\017TensorArrayPack\022\r\n\006handle\030\007\200\001\001\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001B(\010\020\022$Use TensorArrayGatherV3 with RangeOp\nr\n\017TensorArrayRead\022\r\n\006handle\030\007\200\001\001\022\t\n\005index\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004typeB\031\010\020\022\025Use TensorArrayReadV3\nq\n\021TensorArrayReadV2\022\n\n\006handle\030\007\022\t\n\005index\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004typeB\031\010\032\022\025Use TensorArrayReadV3\nY\n\021TensorArrayReadV3\022\n\n\006handle\030\024\022\t\n\005index\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\210\001\001\n}\n\022TensorArrayScatter\022\r\n\006handle\030\007\200\001\001\022\013\n\007indices\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\031\010\023\022\025Use TensorArrayGradV3\n\177\n\024TensorArrayScatterV2\022\n\n\006handle\030\007\022\013\n\007indices\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\034\010\032\022\030Use TensorArrayScatterV3\nd\n\024TensorArrayScatterV3\022\n\n\006handle\030\024\022\013\n\007indices\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004type\210\001\001\nR\n\017TensorArraySize\022\r\n\006handle\030\007\200\001\001\022\013\n\007flow_in\030\001\032\010\n\004size\030\003B\031\010\020\022\025Use TensorArraySizeV3\nQ\n\021TensorArraySizeV2\022\n\n\006handle\030\007\022\013\n\007flow_in\030\001\032\010\n\004size\030\003B\031\010\032\022\025Use TensorArraySizeV3\n9\n\021TensorArraySizeV3\022\n\n\006handle\030\024\022\013\n\007flow_in\030\001\032\010\n\004size\030\003\210\001\001\n|\n\020TensorArraySplit\022\r\n\006handle\030\007\200\001\001\022\n\n\005value\"\001T\022\013\n\007lengths\030\t\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\032\010\020\022\026Use TensorArraySplitV3\n{\n\022TensorArraySplitV2\022\n\n\006handle\030\007\022\n\n\005value\"\001T\022\013\n\007lengths\030\t\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\032\010\032\022\026Use TensorArraySplitV3\nb\n\022TensorArraySplitV3\022\n\n\006handle\030\024\022\n\n\005value\"\001T\022\013\n\007lengths\030\t\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004type\210\001\001\n\177\n\021TensorArrayUnpack\022\r\n\006handle\030\007\200\001\001\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB)\010\024\022%Use TensorArrayScatterV3 with RangeOp\n\305\001\n\rTensorArrayV2\022\010\n\004size\030\003\032\n\n\006handle\030\007\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001\"\030\n\014dynamic_size\022\004bool\032\002(\000\"\034\n\020clear_after_read\022\004bool\032\002(\001\"\037\n\021tensor_array_name\022\006string\032\002\022\000B\025\010\032\022\021Use TensorArrayV3\210\001\001\n\336\001\n\rTensorArrayV3\022\010\n\004size\030\003\032\n\n\006handle\030\024\032\010\n\004flow\030\001\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001\"\030\n\014dynamic_size\022\004bool\032\002(\000\"\034\n\020clear_after_read\022\004bool\032\002(\001\"$\n\030identical_element_shapes\022\004bool\032\002(\000\"\037\n\021tensor_array_name\022\006string\032\002\022\000\210\001\001\nz\n\020TensorArrayWrite\022\r\n\006handle\030\007\200\001\001\022\t\n\005index\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\032\010\020\022\026Use TensorArrayWriteV3\ny\n\022TensorArrayWriteV2\022\n\n\006handle\030\007\022\t\n\005index\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\032\010\032\022\026Use TensorArrayWriteV3\n`\n\022TensorArrayWriteV3\022\n\n\006handle\030\024\022\t\n\005index\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004type\210\001\001\n\236\001\n\007Unstage\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001")